static ssize_t pdcspath_hwpath_write(struct pdcspath_entry *entry, const char *buf, size_t count) { struct hardware_path hwpath; unsigned short i; char in[count+1], *temp; struct device *dev; int ret; if (!entry || !buf || !count) return -EINVAL; memset(in, 0, count+1); strncpy(in, buf, count); memset(&hwpath, 0xff, sizeof(hwpath)); if (!(temp = strrchr(in, '/'))) return -EINVAL; hwpath.mod = simple_strtoul(temp+1, NULL, 10); in[temp-in] = '\0'; DPRINTK("%s: mod: %d\n", __func__, hwpath.mod); for (i=5; ((temp = strrchr(in, '/'))) && (temp-in > 0) && (likely(i)); i--) { hwpath.bc[i] = simple_strtoul(temp+1, NULL, 10); in[temp-in] = '\0'; DPRINTK("%s: bc[%d]: %d\n", __func__, i, hwpath.bc[i]); } hwpath.bc[i] = simple_strtoul(in, NULL, 10); DPRINTK("%s: bc[%d]: %d\n", __func__, i, hwpath.bc[i]); if (!(dev = hwpath_to_device((struct hardware_path *)&hwpath))) { printk(KERN_WARNING "%s: attempt to set invalid \"%s\" " "hardware path: %s\n", __func__, entry->name, buf); return -EINVAL; } write_lock(&entry->rw_lock); entry->ready = 0; entry->dev = dev; pdcspath_store(entry); sysfs_remove_link(&entry->kobj, "device"); ret = sysfs_create_link(&entry->kobj, &entry->dev->kobj, "device"); WARN_ON(ret); write_unlock(&entry->rw_lock); printk(KERN_INFO PDCS_PREFIX ": changed \"%s\" path to \"%s\"\n", entry->name, buf); return count; }
static ssize_t ncp_file_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct dentry *dentry = file->f_dentry; struct inode *inode = dentry->d_inode; size_t already_read = 0; off_t pos; size_t bufsize; int error; void* freepage; size_t freelen; DPRINTK("ncp_file_read: enter %s/%s\n", dentry->d_parent->d_name.name, dentry->d_name.name); if (!ncp_conn_valid(NCP_SERVER(inode))) return -EIO; if (!S_ISREG(inode->i_mode)) { DPRINTK("ncp_file_read: read from non-file, mode %07o\n", inode->i_mode); return -EINVAL; } pos = *ppos; if ((ssize_t) count < 0) { return -EINVAL; } if (!count) return 0; if (pos > inode->i_sb->s_maxbytes) return 0; if (pos + count > inode->i_sb->s_maxbytes) { count = inode->i_sb->s_maxbytes - pos; } error = ncp_make_open(inode, O_RDONLY); if (error) { DPRINTK(KERN_ERR "ncp_file_read: open failed, error=%d\n", error); return error; } bufsize = NCP_SERVER(inode)->buffer_size; error = -EIO; freelen = ncp_read_bounce_size(bufsize); freepage = vmalloc(freelen); if (!freepage) goto outrel; error = 0; /* First read in as much as possible for each bufsize. */ while (already_read < count) { int read_this_time; size_t to_read = min_t(unsigned int, bufsize - (pos % bufsize), count - already_read); error = ncp_read_bounce(NCP_SERVER(inode), NCP_FINFO(inode)->file_handle, pos, to_read, buf, &read_this_time, freepage, freelen); if (error) { error = -EIO; /* NW errno -> Linux errno */ break; } pos += read_this_time; buf += read_this_time; already_read += read_this_time; if (read_this_time != to_read) { break; } } vfree(freepage); *ppos = pos; if (!IS_RDONLY(inode)) { inode->i_atime = CURRENT_TIME; } DPRINTK("ncp_file_read: exit %s/%s\n", dentry->d_parent->d_name.name, dentry->d_name.name); outrel: ncp_inode_close(inode); return already_read ? already_read : error; }
static int ncp_release(struct inode *inode, struct file *file) { if (ncp_make_closed(inode)) { DPRINTK("ncp_release: failed to close\n"); } return 0; }
ssize_t parport_read (struct parport *port, void *buffer, size_t len) { #ifndef CONFIG_PARPORT_1284 printk (KERN_ERR "parport: IEEE1284 not supported in this kernel\n"); return -ENODEV; #else int mode = port->physport->ieee1284.mode; int addr = mode & IEEE1284_ADDR; size_t (*fn) (struct parport *, void *, size_t, int); /* Ignore the device-ID-request bit and the address bit. */ mode &= ~(IEEE1284_DEVICEID | IEEE1284_ADDR); /* Use the mode we're in. */ switch (mode) { case IEEE1284_MODE_COMPAT: /* if we can tri-state use BYTE mode instead of NIBBLE mode, * if that fails, revert to NIBBLE mode -- ought to store somewhere * the device's ability to do BYTE mode reverse transfers, so we don't * end up needlessly calling negotiate(BYTE) repeately.. (fb) */ if ((port->physport->modes & PARPORT_MODE_TRISTATE) && !parport_negotiate (port, IEEE1284_MODE_BYTE)) { /* got into BYTE mode OK */ DPRINTK (KERN_DEBUG "%s: Using byte mode\n", port->name); fn = port->ops->byte_read_data; break; } if (parport_negotiate (port, IEEE1284_MODE_NIBBLE)) { return -EIO; } /* fall through to NIBBLE */ case IEEE1284_MODE_NIBBLE: DPRINTK (KERN_DEBUG "%s: Using nibble mode\n", port->name); fn = port->ops->nibble_read_data; break; case IEEE1284_MODE_BYTE: DPRINTK (KERN_DEBUG "%s: Using byte mode\n", port->name); fn = port->ops->byte_read_data; break; case IEEE1284_MODE_EPP: DPRINTK (KERN_DEBUG "%s: Using EPP mode\n", port->name); if (addr) { fn = port->ops->epp_read_addr; } else { fn = port->ops->epp_read_data; } break; case IEEE1284_MODE_EPPSWE: DPRINTK (KERN_DEBUG "%s: Using software-emulated EPP mode\n", port->name); if (addr) { fn = parport_ieee1284_epp_read_addr; } else { fn = parport_ieee1284_epp_read_data; } break; case IEEE1284_MODE_ECP: case IEEE1284_MODE_ECPRLE: DPRINTK (KERN_DEBUG "%s: Using ECP mode\n", port->name); fn = port->ops->ecp_read_data; break; case IEEE1284_MODE_ECPSWE: DPRINTK (KERN_DEBUG "%s: Using software-emulated ECP mode\n", port->name); fn = parport_ieee1284_ecp_read_data; break; default: DPRINTK (KERN_DEBUG "%s: Unknown mode 0x%02x\n", port->name, port->physport->ieee1284.mode); return -ENOSYS; } return (*fn) (port, buffer, len, 0); #endif /* IEEE1284 support */ }
static int stmhdmi_send_data_packet(struct stm_hdmi *dev, unsigned long arg) { stm_meta_data_result_t res; stm_meta_data_t *metadata; stm_hdmi_info_frame_t *iframe; struct stmhdmiio_data_packet packet; if (copy_from_user(&packet,(void*)arg,sizeof(packet))) return -EFAULT; if((metadata = kzalloc(sizeof(stm_meta_data_t)+sizeof(stm_hdmi_info_frame_t),GFP_KERNEL)) == 0) return -ENOMEM; metadata->size = sizeof(stm_meta_data_t)+sizeof(stm_hdmi_info_frame_t); metadata->release = (void(*)(struct stm_meta_data_s*))kfree; metadata->presentationTime = ((TIME64)packet.timestamp.tv_sec * USEC_PER_SEC) + (TIME64)packet.timestamp.tv_usec; switch(packet.type) { case HDMI_ACP_PACKET_TYPE: { /* * Don't allow the configuration of ACP packets unless the * connected TV has the supports AI flag set in its EDID. */ if((dev->edid_info.display_type != STM_DISPLAY_HDMI) || (dev->edid_info.hdmi_vsdb_flags & STM_HDMI_VSDB_SUPPORTS_AI) == 0) { DPRINTK("Not Sending ACP Datapacket, sink does not support AI\n"); kfree(metadata); return -EPERM; } DPRINTK("Sending ACP Datapacket\n"); metadata->type = STM_METADATA_TYPE_ACP_DATA; break; } case HDMI_VENDOR_INFOFRAME_TYPE: { DPRINTK("Sending vendor IFrame\n"); metadata->type = STM_METADATA_TYPE_VENDOR_IFRAME; break; } case HDMI_NTSC_INFOFRAME_TYPE: { DPRINTK("Sending NTSC IFrame\n"); metadata->type = STM_METADATA_TYPE_NTSC_IFRAME; break; } case HDMI_GAMUT_DATA_PACKET_TYPE: { DPRINTK("Sending Color Gamut Datapacket\n"); metadata->type = STM_METADATA_TYPE_COLOR_GAMUT_DATA; break; } default: { DPRINTK("Unsupported Datapacket\n"); kfree(metadata); return -EINVAL; } } iframe = (stm_hdmi_info_frame_t*)&metadata->data[0]; iframe->type = packet.type; iframe->version = packet.version; iframe->length = packet.length; /* * Note: we cannot use packet.length to size the memcpy as this is only * valid for real InfoFrames not arbitrary HDMI data island packets. */ memcpy(&iframe->data[0],&packet.data[0],28); if(stm_display_output_queue_metadata(dev->hdmi_output, metadata, &res)<0) { kfree(metadata); if(signal_pending(current)) return -ERESTARTSYS; else return -EIO; } return stmhdmi_convert_metadata_result_to_errno(res); }
static int assign_addrs(struct parport *port) { unsigned char s; unsigned char daisy; int thisdev = numdevs; int detected; char *deviceid; parport_data_forward(port); parport_write_data(port, 0xaa); udelay(2); parport_write_data(port, 0x55); udelay(2); parport_write_data(port, 0x00); udelay(2); parport_write_data(port, 0xff); udelay(2); s = parport_read_status(port) & (PARPORT_STATUS_BUSY | PARPORT_STATUS_PAPEROUT | PARPORT_STATUS_SELECT | PARPORT_STATUS_ERROR); if (s != (PARPORT_STATUS_BUSY | PARPORT_STATUS_PAPEROUT | PARPORT_STATUS_SELECT | PARPORT_STATUS_ERROR)) { DPRINTK(KERN_DEBUG "%s: assign_addrs: aa5500ff(%02x)\n", port->name, s); return 0; } parport_write_data(port, 0x87); udelay(2); s = parport_read_status(port) & (PARPORT_STATUS_BUSY | PARPORT_STATUS_PAPEROUT | PARPORT_STATUS_SELECT | PARPORT_STATUS_ERROR); if (s != (PARPORT_STATUS_SELECT | PARPORT_STATUS_ERROR)) { DPRINTK(KERN_DEBUG "%s: assign_addrs: aa5500ff87(%02x)\n", port->name, s); return 0; } parport_write_data(port, 0x78); udelay(2); s = parport_read_status(port); for (daisy = 0; (s & (PARPORT_STATUS_PAPEROUT|PARPORT_STATUS_SELECT)) == (PARPORT_STATUS_PAPEROUT|PARPORT_STATUS_SELECT) && daisy < 4; ++daisy) { parport_write_data(port, daisy); udelay(2); parport_frob_control(port, PARPORT_CONTROL_STROBE, PARPORT_CONTROL_STROBE); udelay(1); parport_frob_control(port, PARPORT_CONTROL_STROBE, 0); udelay(1); add_dev(numdevs++, port, daisy); /* See if this device thought it was the last in the * chain. */ if (!(s & PARPORT_STATUS_BUSY)) break; /* We are seeing pass through status now. We see last_dev from next device or if last_dev does not work status lines from some non-daisy chain device. */ s = parport_read_status(port); } parport_write_data(port, 0xff); udelay(2); detected = numdevs - thisdev; DPRINTK(KERN_DEBUG "%s: Found %d daisy-chained devices\n", port->name, detected); /* Ask the new devices to introduce themselves. */ deviceid = kmalloc(1024, GFP_KERNEL); if (!deviceid) return 0; for (daisy = 0; thisdev < numdevs; thisdev++, daisy++) parport_device_id(thisdev, deviceid, 1024); kfree(deviceid); return detected; }
int parport_negotiate (struct parport *port, int mode) { #ifndef CONFIG_PARPORT_1284 if (mode == IEEE1284_MODE_COMPAT) return 0; printk (KERN_ERR "parport: IEEE1284 not supported in this kernel\n"); return -1; #else int m = mode & ~IEEE1284_ADDR; int r; unsigned char xflag; port = port->physport; /* Is there anything to do? */ if (port->ieee1284.mode == mode) return 0; /* Is the difference just an address-or-not bit? */ if ((port->ieee1284.mode & ~IEEE1284_ADDR) == (mode & ~IEEE1284_ADDR)){ port->ieee1284.mode = mode; return 0; } /* Go to compability forward idle mode */ if (port->ieee1284.mode != IEEE1284_MODE_COMPAT) parport_ieee1284_terminate (port); if (mode == IEEE1284_MODE_COMPAT) /* Compatibility mode: no negotiation. */ return 0; switch (mode) { case IEEE1284_MODE_ECPSWE: m = IEEE1284_MODE_ECP; break; case IEEE1284_MODE_EPPSL: case IEEE1284_MODE_EPPSWE: m = IEEE1284_MODE_EPP; break; case IEEE1284_MODE_BECP: return -ENOSYS; /* FIXME (implement BECP) */ } if (mode & IEEE1284_EXT_LINK) m = 1<<7; /* request extensibility link */ port->ieee1284.phase = IEEE1284_PH_NEGOTIATION; /* Start off with nStrobe and nAutoFd high, and nSelectIn low */ parport_frob_control (port, PARPORT_CONTROL_STROBE | PARPORT_CONTROL_AUTOFD | PARPORT_CONTROL_SELECT, PARPORT_CONTROL_SELECT); udelay(1); /* Event 0: Set data */ parport_data_forward (port); parport_write_data (port, m); udelay (400); /* Shouldn't need to wait this long. */ /* Event 1: Set nSelectIn high, nAutoFd low */ parport_frob_control (port, PARPORT_CONTROL_SELECT | PARPORT_CONTROL_AUTOFD, PARPORT_CONTROL_AUTOFD); /* Event 2: PError, Select, nFault go high, nAck goes low */ if (parport_wait_peripheral (port, PARPORT_STATUS_ERROR | PARPORT_STATUS_SELECT | PARPORT_STATUS_PAPEROUT | PARPORT_STATUS_ACK, PARPORT_STATUS_ERROR | PARPORT_STATUS_SELECT | PARPORT_STATUS_PAPEROUT)) { /* Timeout */ parport_frob_control (port, PARPORT_CONTROL_SELECT | PARPORT_CONTROL_AUTOFD, PARPORT_CONTROL_SELECT); DPRINTK (KERN_DEBUG "%s: Peripheral not IEEE1284 compliant (0x%02X)\n", port->name, parport_read_status (port)); port->ieee1284.phase = IEEE1284_PH_FWD_IDLE; return -1; /* Not IEEE1284 compliant */ } /* Event 3: Set nStrobe low */ parport_frob_control (port, PARPORT_CONTROL_STROBE, PARPORT_CONTROL_STROBE); /* Event 4: Set nStrobe and nAutoFd high */ udelay (5); parport_frob_control (port, PARPORT_CONTROL_STROBE | PARPORT_CONTROL_AUTOFD, 0); /* Event 6: nAck goes high */ if (parport_wait_peripheral (port, PARPORT_STATUS_ACK, PARPORT_STATUS_ACK)) { /* This shouldn't really happen with a compliant device. */ DPRINTK (KERN_DEBUG "%s: Mode 0x%02x not supported? (0x%02x)\n", port->name, mode, port->ops->read_status (port)); parport_ieee1284_terminate (port); return 1; } xflag = parport_read_status (port) & PARPORT_STATUS_SELECT; /* xflag should be high for all modes other than nibble (0). */ if (mode && !xflag) { /* Mode not supported. */ DPRINTK (KERN_DEBUG "%s: Mode 0x%02x rejected by peripheral\n", port->name, mode); parport_ieee1284_terminate (port); return 1; } /* More to do if we've requested extensibility link. */ if (mode & IEEE1284_EXT_LINK) { m = mode & 0x7f; udelay (1); parport_write_data (port, m); udelay (1); /* Event 51: Set nStrobe low */ parport_frob_control (port, PARPORT_CONTROL_STROBE, PARPORT_CONTROL_STROBE); /* Event 52: nAck goes low */ if (parport_wait_peripheral (port, PARPORT_STATUS_ACK, 0)) { /* This peripheral is _very_ slow. */ DPRINTK (KERN_DEBUG "%s: Event 52 didn't happen\n", port->name); parport_ieee1284_terminate (port); return 1; } /* Event 53: Set nStrobe high */ parport_frob_control (port, PARPORT_CONTROL_STROBE, 0); /* Event 55: nAck goes high */ if (parport_wait_peripheral (port, PARPORT_STATUS_ACK, PARPORT_STATUS_ACK)) { /* This shouldn't really happen with a compliant * device. */ DPRINTK (KERN_DEBUG "%s: Mode 0x%02x not supported? (0x%02x)\n", port->name, mode, port->ops->read_status (port)); parport_ieee1284_terminate (port); return 1; } /* Event 54: Peripheral sets XFlag to reflect support */ xflag = parport_read_status (port) & PARPORT_STATUS_SELECT; /* xflag should be high. */ if (!xflag) { /* Extended mode not supported. */ DPRINTK (KERN_DEBUG "%s: Extended mode 0x%02x not " "supported\n", port->name, mode); parport_ieee1284_terminate (port); return 1; } /* Any further setup is left to the caller. */ } /* Mode is supported */ DPRINTK (KERN_DEBUG "%s: In mode 0x%02x\n", port->name, mode); port->ieee1284.mode = mode; /* But ECP is special */ if (!(mode & IEEE1284_EXT_LINK) && (m & IEEE1284_MODE_ECP)) { port->ieee1284.phase = IEEE1284_PH_ECP_SETUP; /* Event 30: Set nAutoFd low */ parport_frob_control (port, PARPORT_CONTROL_AUTOFD, PARPORT_CONTROL_AUTOFD); /* Event 31: PError goes high. */ r = parport_wait_peripheral (port, PARPORT_STATUS_PAPEROUT, PARPORT_STATUS_PAPEROUT); if (r) { DPRINTK (KERN_INFO "%s: Timeout at event 31\n", port->name); } port->ieee1284.phase = IEEE1284_PH_FWD_IDLE; DPRINTK (KERN_DEBUG "%s: ECP direction: forward\n", port->name); } else switch (mode) { case IEEE1284_MODE_NIBBLE: case IEEE1284_MODE_BYTE: port->ieee1284.phase = IEEE1284_PH_REV_IDLE; break; default: port->ieee1284.phase = IEEE1284_PH_FWD_IDLE; } return 0; #endif /* IEEE1284 support */ }
/** * pdcspath_hwpath_write - This function handles hardware path modifying. * @entry: An allocated and populated pdscpath_entry struct. * @buf: The input buffer to read from. * @count: The number of bytes to be read. * * We will call this function to change the current hardware path. * Hardware paths are to be given '/'-delimited, without brackets. * We make sure that the provided path actually maps to an existing * device, BUT nothing would prevent some foolish user to set the path to some * PCI bridge or even a CPU... * A better work around would be to make sure we are at the end of a device tree * for instance, but it would be IMHO beyond the simple scope of that driver. * The aim is to provide a facility. Data correctness is left to userland. */ static ssize_t pdcspath_hwpath_write(struct pdcspath_entry *entry, const char *buf, size_t count) { struct hardware_path hwpath; unsigned short i; char in[count+1], *temp; struct device *dev; int ret; if (!entry || !buf || !count) return -EINVAL; /* We'll use a local copy of buf */ memset(in, 0, count+1); strncpy(in, buf, count); /* Let's clean up the target. 0xff is a blank pattern */ memset(&hwpath, 0xff, sizeof(hwpath)); /* First, pick the mod field (the last one of the input string) */ if (!(temp = strrchr(in, '/'))) return -EINVAL; hwpath.mod = simple_strtoul(temp+1, NULL, 10); in[temp-in] = '\0'; /* truncate the remaining string. just precaution */ DPRINTK("%s: mod: %d\n", __func__, hwpath.mod); /* Then, loop for each delimiter, making sure we don't have too many. we write the bc fields in a down-top way. No matter what, we stop before writing the last field. If there are too many fields anyway, then the user is a moron and it'll be caught up later when we'll check the consistency of the given hwpath. */ for (i=5; ((temp = strrchr(in, '/'))) && (temp-in > 0) && (likely(i)); i--) { hwpath.bc[i] = simple_strtoul(temp+1, NULL, 10); in[temp-in] = '\0'; DPRINTK("%s: bc[%d]: %d\n", __func__, i, hwpath.bc[i]); } /* Store the final field */ hwpath.bc[i] = simple_strtoul(in, NULL, 10); DPRINTK("%s: bc[%d]: %d\n", __func__, i, hwpath.bc[i]); /* Now we check that the user isn't trying to lure us */ if (!(dev = hwpath_to_device((struct hardware_path *)&hwpath))) { printk(KERN_WARNING "%s: attempt to set invalid \"%s\" " "hardware path: %s\n", __func__, entry->name, buf); return -EINVAL; } /* So far so good, let's get in deep */ write_lock(&entry->rw_lock); entry->ready = 0; entry->dev = dev; /* Now, dive in. Write back to the hardware */ pdcspath_store(entry); /* Update the symlink to the real device */ sysfs_remove_link(&entry->kobj, "device"); ret = sysfs_create_link(&entry->kobj, &entry->dev->kobj, "device"); WARN_ON(ret); write_unlock(&entry->rw_lock); printk(KERN_INFO PDCS_PREFIX ": changed \"%s\" path to \"%s\"\n", entry->name, buf); return count; }
/** * pdcs_auto_write - This function handles autoboot/search flag modifying. * @buf: The input buffer to read from. * @count: The number of bytes to be read. * @knob: The PF_AUTOBOOT or PF_AUTOSEARCH flag * * We will call this function to change the current autoboot flag. * We expect a precise syntax: * \"n\" (n == 0 or 1) to toggle AutoBoot Off or On */ static ssize_t pdcs_auto_write(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count, int knob) { struct pdcspath_entry *pathentry; unsigned char flags; char in[count+1], *temp; char c; if (!capable(CAP_SYS_ADMIN)) return -EACCES; if (!buf || !count) return -EINVAL; /* We'll use a local copy of buf */ memset(in, 0, count+1); strncpy(in, buf, count); /* Current flags are stored in primary boot path entry */ pathentry = &pdcspath_entry_primary; /* Be nice to the existing flag record */ read_lock(&pathentry->rw_lock); flags = pathentry->devpath.flags; read_unlock(&pathentry->rw_lock); DPRINTK("%s: flags before: 0x%X\n", __func__, flags); temp = skip_spaces(in); c = *temp++ - '0'; if ((c != 0) && (c != 1)) goto parse_error; if (c == 0) flags &= ~knob; else flags |= knob; DPRINTK("%s: flags after: 0x%X\n", __func__, flags); /* So far so good, let's get in deep */ write_lock(&pathentry->rw_lock); /* Change the path entry flags first */ pathentry->devpath.flags = flags; /* Now, dive in. Write back to the hardware */ pdcspath_store(pathentry); write_unlock(&pathentry->rw_lock); printk(KERN_INFO PDCS_PREFIX ": changed \"%s\" to \"%s\"\n", (knob & PF_AUTOBOOT) ? "autoboot" : "autosearch", (flags & knob) ? "On" : "Off"); return count; parse_error: printk(KERN_WARNING "%s: Parse error: expect \"n\" (n == 0 or 1)\n", __func__); return -EINVAL; }
/* * Callback received when the hotplug scripts have placed the physical-device * node. Read it and the mode node, and create a vbd. If the frontend is * ready, connect. */ static void backend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { int err; unsigned major; unsigned minor; struct backend_info *be = container_of(watch, struct backend_info, backend_watch); struct xenbus_device *dev = be->dev; int cdrom = 0; char *device_type; DPRINTK(""); err = xenbus_scanf(XBT_NIL, dev->nodename, "physical-device", "%x:%x", &major, &minor); if (XENBUS_EXIST_ERR(err)) { /* * Since this watch will fire once immediately after it is * registered, we expect this. Ignore it, and wait for the * hotplug scripts. */ return; } if (err != 2) { xenbus_dev_fatal(dev, err, "reading physical-device"); return; } if ((be->major || be->minor) && ((be->major != major) || (be->minor != minor))) { pr_warn(DRV_PFX "changing physical device (from %x:%x to %x:%x) not supported.\n", be->major, be->minor, major, minor); return; } be->mode = xenbus_read(XBT_NIL, dev->nodename, "mode", NULL); if (IS_ERR(be->mode)) { err = PTR_ERR(be->mode); be->mode = NULL; xenbus_dev_fatal(dev, err, "reading mode"); return; } device_type = xenbus_read(XBT_NIL, dev->otherend, "device-type", NULL); if (!IS_ERR(device_type)) { cdrom = strcmp(device_type, "cdrom") == 0; kfree(device_type); } if (be->major == 0 && be->minor == 0) { /* Front end dir is a number, which is used as the handle. */ char *p = strrchr(dev->otherend, '/') + 1; long handle; err = strict_strtoul(p, 0, &handle); if (err) return; be->major = major; be->minor = minor; err = xen_vbd_create(be->blkif, handle, major, minor, (NULL == strchr(be->mode, 'w')), cdrom); if (err) { be->major = 0; be->minor = 0; xenbus_dev_fatal(dev, err, "creating vbd structure"); return; } err = xenvbd_sysfs_addif(dev); if (err) { xen_vbd_free(&be->blkif->vbd); be->major = 0; be->minor = 0; xenbus_dev_fatal(dev, err, "creating sysfs entries"); return; } /* We're potentially connected now */ xen_update_blkif_status(be->blkif); } }
/* * Write the physical details regarding the block device to the store, and * switch to Connected state. */ static void connect(struct backend_info *be) { struct xenbus_transaction xbt; int err; struct xenbus_device *dev = be->dev; DPRINTK("%s", dev->otherend); /* Supply the information about the device the frontend needs */ again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); return; } err = xen_blkbk_flush_diskcache(xbt, be, be->blkif->vbd.flush_support); if (err) goto abort; err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu", (unsigned long long)vbd_sz(&be->blkif->vbd)); if (err) { xenbus_dev_fatal(dev, err, "writing %s/sectors", dev->nodename); goto abort; } /* FIXME: use a typename instead */ err = xenbus_printf(xbt, dev->nodename, "info", "%u", be->blkif->vbd.type | (be->blkif->vbd.readonly ? VDISK_READONLY : 0)); if (err) { xenbus_dev_fatal(dev, err, "writing %s/info", dev->nodename); goto abort; } err = xenbus_printf(xbt, dev->nodename, "sector-size", "%lu", (unsigned long) bdev_logical_block_size(be->blkif->vbd.bdev)); if (err) { xenbus_dev_fatal(dev, err, "writing %s/sector-size", dev->nodename); goto abort; } err = xenbus_transaction_end(xbt, 0); if (err == -EAGAIN) goto again; if (err) xenbus_dev_fatal(dev, err, "ending transaction"); err = xenbus_switch_state(dev, XenbusStateConnected); if (err) xenbus_dev_fatal(dev, err, "switching to Connected state", dev->nodename); return; abort: xenbus_transaction_end(xbt, 1); }
/* * Called whenever the USB subsystem thinks we could be the right driver * to handle this device */ static int probe(struct usb_interface *intf, const struct usb_device_id *id) { int alt_set, endp; int found = 0; int i, j; int struct_size; struct usb_host_interface *host_interf; struct usb_interface_descriptor *interf_desc; struct usb_host_endpoint *host_endpoint; struct ttusbir_device *ttusbir; DPRINTK("Module ttusbir probe\n"); /* To reduce memory fragmentation we use only one allocation */ struct_size = sizeof(struct ttusbir_device) + (sizeof(struct urb *) * num_urbs) + (sizeof(char *) * num_urbs) + (num_urbs * 128); ttusbir = kzalloc(struct_size, GFP_KERNEL); if (!ttusbir) return -ENOMEM; ttusbir->urb = (struct urb **)((char *)ttusbir + sizeof(struct ttusbir_device)); ttusbir->buffer = (char **)((char *)ttusbir->urb + (sizeof(struct urb *) * num_urbs)); for (i = 0; i < num_urbs; i++) ttusbir->buffer[i] = (char *)ttusbir->buffer + (sizeof(char *)*num_urbs) + (i * 128); ttusbir->usb_driver = &usb_driver; ttusbir->alt_setting = -1; /* @TODO check if error can be returned */ ttusbir->udev = usb_get_dev(interface_to_usbdev(intf)); ttusbir->interf = intf; ttusbir->last_pulse = 0x00; ttusbir->last_num = 0; /* * Now look for interface setting we can handle * We are searching for the alt setting where end point * 0x82 has max packet size 16 */ for (alt_set = 0; alt_set < intf->num_altsetting && !found; alt_set++) { host_interf = &intf->altsetting[alt_set]; interf_desc = &host_interf->desc; for (endp = 0; endp < interf_desc->bNumEndpoints; endp++) { host_endpoint = &host_interf->endpoint[endp]; if ((host_endpoint->desc.bEndpointAddress == 0x82) && (host_endpoint->desc.wMaxPacketSize == 0x10)) { ttusbir->alt_setting = alt_set; ttusbir->endpoint = endp; found = 1; break; } } } if (ttusbir->alt_setting != -1) DPRINTK("alt setting: %d\n", ttusbir->alt_setting); else { err("Could not find alternate setting\n"); kfree(ttusbir); return -EINVAL; } /* OK lets setup this interface setting */ usb_set_interface(ttusbir->udev, 0, ttusbir->alt_setting); /* Store device info in interface structure */ usb_set_intfdata(intf, ttusbir); /* Register as a LIRC driver */ if (lirc_buffer_init(&ttusbir->rbuf, sizeof(lirc_t), 256) < 0) { err("Could not get memory for LIRC data buffer\n"); usb_set_intfdata(intf, NULL); kfree(ttusbir); return -ENOMEM; } strcpy(ttusbir->driver.name, "TTUSBIR"); ttusbir->driver.minor = -1; ttusbir->driver.code_length = 1; ttusbir->driver.sample_rate = 0; ttusbir->driver.data = ttusbir; ttusbir->driver.add_to_buf = NULL; ttusbir->driver.rbuf = &ttusbir->rbuf; ttusbir->driver.set_use_inc = set_use_inc; ttusbir->driver.set_use_dec = set_use_dec; ttusbir->driver.fops = NULL; ttusbir->driver.dev = &intf->dev; ttusbir->driver.owner = THIS_MODULE; ttusbir->driver.features = LIRC_CAN_REC_MODE2; ttusbir->minor = lirc_register_driver(&ttusbir->driver); if (ttusbir->minor < 0) { err("Error registering as LIRC driver\n"); usb_set_intfdata(intf, NULL); lirc_buffer_free(&ttusbir->rbuf); kfree(ttusbir); return -EIO; } /* Allocate and setup the URB that we will use to talk to the device */ for (i = 0; i < num_urbs; i++) { ttusbir->urb[i] = usb_alloc_urb(8, GFP_KERNEL); if (!ttusbir->urb[i]) { err("Could not allocate memory for the URB\n"); for (j = i - 1; j >= 0; j--) kfree(ttusbir->urb[j]); lirc_buffer_free(&ttusbir->rbuf); lirc_unregister_driver(ttusbir->minor); kfree(ttusbir); usb_set_intfdata(intf, NULL); return -ENOMEM; } ttusbir->urb[i]->dev = ttusbir->udev; ttusbir->urb[i]->context = ttusbir; ttusbir->urb[i]->pipe = usb_rcvisocpipe(ttusbir->udev, ttusbir->endpoint); ttusbir->urb[i]->interval = 1; ttusbir->urb[i]->transfer_flags = URB_ISO_ASAP; ttusbir->urb[i]->transfer_buffer = &ttusbir->buffer[i][0]; ttusbir->urb[i]->complete = urb_complete; ttusbir->urb[i]->number_of_packets = 8; ttusbir->urb[i]->transfer_buffer_length = 128; for (j = 0; j < 8; j++) { ttusbir->urb[i]->iso_frame_desc[j].offset = j*16; ttusbir->urb[i]->iso_frame_desc[j].length = 16; } } return 0; }
/** * ixgbe_check_options - Range Checking for Command Line Parameters * @adapter: board private structure * * This routine checks all command line parameters for valid user * input. If an invalid value is given, or if no user specified * value exists, a default value is used. The final value is stored * in a variable in the adapter structure. **/ void __devinit ixgbe_check_options(struct ixgbe_adapter *adapter) { int bd = adapter->bd_number; u32 *aflags = &adapter->flags; struct ixgbe_ring_feature *feature = adapter->ring_feature; if (bd >= IXGBE_MAX_NIC) { printk(KERN_NOTICE "Warning: no configuration for board #%d\n", bd); printk(KERN_NOTICE "Using defaults for all values\n"); #ifndef module_param_array bd = IXGBE_MAX_NIC; #endif } { /* Interrupt Mode */ unsigned int int_mode; static struct ixgbe_option opt = { .type = range_option, .name = "Interrupt Mode", .err = "using default of "__MODULE_STRING(IXGBE_DEFAULT_INT), .def = IXGBE_DEFAULT_INT, .arg = { .r = { .min = IXGBE_INT_LEGACY, .max = IXGBE_INT_MSIX} } }; #ifdef module_param_array if (num_IntMode > bd || num_InterruptType > bd) { #endif int_mode = IntMode[bd]; if (int_mode == OPTION_UNSET) int_mode = InterruptType[bd]; ixgbe_validate_option(&int_mode, &opt); switch (int_mode) { case IXGBE_INT_MSIX: if (!(*aflags & IXGBE_FLAG_MSIX_CAPABLE)) printk(KERN_INFO "Ignoring MSI-X setting; " "support unavailable\n"); break; case IXGBE_INT_MSI: if (!(*aflags & IXGBE_FLAG_MSI_CAPABLE)) { printk(KERN_INFO "Ignoring MSI setting; " "support unavailable\n"); } else { *aflags &= ~IXGBE_FLAG_MSIX_CAPABLE; } break; case IXGBE_INT_LEGACY: default: *aflags &= ~IXGBE_FLAG_MSIX_CAPABLE; *aflags &= ~IXGBE_FLAG_MSI_CAPABLE; break; } #ifdef module_param_array } else { /* default settings */ if (opt.def == IXGBE_INT_MSIX && *aflags & IXGBE_FLAG_MSIX_CAPABLE) { *aflags |= IXGBE_FLAG_MSIX_CAPABLE; *aflags |= IXGBE_FLAG_MSI_CAPABLE; } else if (opt.def == IXGBE_INT_MSI && *aflags & IXGBE_FLAG_MSI_CAPABLE) { *aflags &= ~IXGBE_FLAG_MSIX_CAPABLE; *aflags |= IXGBE_FLAG_MSI_CAPABLE; } else { *aflags &= ~IXGBE_FLAG_MSIX_CAPABLE; *aflags &= ~IXGBE_FLAG_MSI_CAPABLE; } } #endif } { /* Multiple Queue Support */ static struct ixgbe_option opt = { .type = enable_option, .name = "Multiple Queue Support", .err = "defaulting to Enabled", .def = OPTION_ENABLED }; #ifdef module_param_array if (num_MQ > bd) { #endif unsigned int mq = MQ[bd]; ixgbe_validate_option(&mq, &opt); if (mq) *aflags |= IXGBE_FLAG_MQ_CAPABLE; else *aflags &= ~IXGBE_FLAG_MQ_CAPABLE; #ifdef module_param_array } else { if (opt.def == OPTION_ENABLED) *aflags |= IXGBE_FLAG_MQ_CAPABLE; else *aflags &= ~IXGBE_FLAG_MQ_CAPABLE; } #endif /* Check Interoperability */ if ((*aflags & IXGBE_FLAG_MQ_CAPABLE) && !(*aflags & IXGBE_FLAG_MSIX_CAPABLE)) { DPRINTK(PROBE, INFO, "Multiple queues are not supported while MSI-X " "is disabled. Disabling Multiple Queues.\n"); *aflags &= ~IXGBE_FLAG_MQ_CAPABLE; } } #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) { /* Direct Cache Access (DCA) */ static struct ixgbe_option opt = { .type = range_option, .name = "Direct Cache Access (DCA)", .err = "defaulting to Enabled", .def = IXGBE_MAX_DCA, .arg = { .r = { .min = OPTION_DISABLED, .max = IXGBE_MAX_DCA} } }; unsigned int dca = opt.def; #ifdef module_param_array if (num_DCA > bd) { #endif dca = DCA[bd]; ixgbe_validate_option(&dca, &opt); if (!dca) *aflags &= ~IXGBE_FLAG_DCA_CAPABLE; /* Check Interoperability */ if (!(*aflags & IXGBE_FLAG_DCA_CAPABLE)) { DPRINTK(PROBE, INFO, "DCA is disabled\n"); *aflags &= ~IXGBE_FLAG_DCA_ENABLED; } if (dca == IXGBE_MAX_DCA) { DPRINTK(PROBE, INFO, "DCA enabled for rx data\n"); adapter->flags |= IXGBE_FLAG_DCA_ENABLED_DATA; } #ifdef module_param_array } else { /* make sure to clear the capability flag if the * option is disabled by default above */ if (opt.def == OPTION_DISABLED) *aflags &= ~IXGBE_FLAG_DCA_CAPABLE; } #endif if (dca == IXGBE_MAX_DCA) adapter->flags |= IXGBE_FLAG_DCA_ENABLED_DATA; } #endif /* CONFIG_DCA or CONFIG_DCA_MODULE */ { /* Receive-Side Scaling (RSS) */ static struct ixgbe_option opt = { .type = range_option, .name = "Receive-Side Scaling (RSS)", .err = "using default.", .def = 0, .arg = { .r = { .min = 0, .max = IXGBE_MAX_RSS_INDICES} } }; unsigned int rss = RSS[bd]; #ifdef module_param_array if (num_RSS > bd) { #endif ixgbe_validate_option(&rss, &opt); /* base it off num_online_cpus() with hardware limit */ if (!rss) rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus()); else feature[RING_F_FDIR].limit = rss; feature[RING_F_RSS].limit = rss; #ifdef module_param_array } else if (opt.def == 0) {
static ssize_t pdcs_auto_write(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count, int knob) { struct pdcspath_entry *pathentry; unsigned char flags; char in[count+1], *temp; char c; if (!capable(CAP_SYS_ADMIN)) return -EACCES; if (!buf || !count) return -EINVAL; memset(in, 0, count+1); strncpy(in, buf, count); pathentry = &pdcspath_entry_primary; read_lock(&pathentry->rw_lock); flags = pathentry->devpath.flags; read_unlock(&pathentry->rw_lock); DPRINTK("%s: flags before: 0x%X\n", __func__, flags); temp = skip_spaces(in); c = *temp++ - '0'; if ((c != 0) && (c != 1)) goto parse_error; if (c == 0) flags &= ~knob; else flags |= knob; DPRINTK("%s: flags after: 0x%X\n", __func__, flags); write_lock(&pathentry->rw_lock); pathentry->devpath.flags = flags; pdcspath_store(pathentry); write_unlock(&pathentry->rw_lock); printk(KERN_INFO PDCS_PREFIX ": changed \"%s\" to \"%s\"\n", (knob & PF_AUTOBOOT) ? "autoboot" : "autosearch", (flags & knob) ? "On" : "Off"); return count; parse_error: printk(KERN_WARNING "%s: Parse error: expect \"n\" (n == 0 or 1)\n", __func__); return -EINVAL; }
static int xenbus_probe_init(void) { int err = 0; unsigned long page = 0; DPRINTK(""); if (!is_running_on_xen()) return -ENODEV; /* Register ourselves with the kernel bus subsystem */ xenbus_frontend.error = bus_register(&xenbus_frontend.bus); if (xenbus_frontend.error) printk(KERN_WARNING "XENBUS: Error registering frontend bus: %i\n", xenbus_frontend.error); xenbus_backend_bus_register(); /* * Domain0 doesn't have a store_evtchn or store_mfn yet. */ if (is_initial_xendomain()) { struct evtchn_alloc_unbound alloc_unbound; /* Allocate page. */ page = get_zeroed_page(GFP_KERNEL); if (!page) return -ENOMEM; xen_store_mfn = xen_start_info->store_mfn = pfn_to_mfn(virt_to_phys((void *)page) >> PAGE_SHIFT); /* Next allocate a local port which xenstored can bind to */ alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = DOMID_SELF; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err == -ENOSYS) goto err; BUG_ON(err); xen_store_evtchn = xen_start_info->store_evtchn = alloc_unbound.port; #if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST) /* And finally publish the above info in /proc/xen */ xsd_kva_intf = create_xen_proc_entry("xsd_kva", 0600); if (xsd_kva_intf) { memcpy(&xsd_kva_fops, xsd_kva_intf->proc_fops, sizeof(xsd_kva_fops)); xsd_kva_fops.mmap = xsd_kva_mmap; xsd_kva_intf->proc_fops = &xsd_kva_fops; xsd_kva_intf->read_proc = xsd_kva_read; } xsd_port_intf = create_xen_proc_entry("xsd_port", 0400); if (xsd_port_intf) xsd_port_intf->read_proc = xsd_port_read; #endif xen_store_interface = mfn_to_virt(xen_store_mfn); } else {
static int pdp_demux(void) { int ret; u8 ch; size_t len; struct pdp_info *dev = NULL; struct pdp_hdr hdr; DPRINTK(2, "BEGIN\n"); /* read header */ ret = dpram_read(dpram_filp, &hdr, sizeof(hdr)); if (ret < 0) { EPRINTK("pdp_demux() dpram_read ret : %d\n",ret); return ret; } len = hdr.len - sizeof(struct pdp_hdr); /* check header */ dev = pdp_get_dev(hdr.id); if (dev == NULL) { EPRINTK("invalid id: %u, there is no existing device.\n", hdr.id); ret = -ENODEV; goto err; } /* read data */ switch (dev->type) { case DEV_TYPE_NET: ret = vnet_recv(dev, len,hdr.id); break; case DEV_TYPE_SERIAL: ret = vs_read(dev, len,hdr.id); break; default: ret = -1; } if (ret < 0) { goto err; } /* check stop byte */ ret = dpram_read(dpram_filp, &ch, sizeof(ch)); if (ret < 0 || ch != 0x7e) { return ret; } DPRINTK(2, "END\n"); return 0; err: /* flush the remaining data including stop byte. */ dpram_flush_rx(dpram_filp, len + 1); return ret; }
inline int nx_enable_nic(struct unm_adapter_s *adapter) { int version; int err; int ctx; int ring; struct net_device *netdev = adapter->netdev; unsigned long flags; #if 0 initialize_adapter_sw(adapter); /* initialize the buffers in adapter */ adapter->ahw.xg_linkup = 0; adapter->procCmdBufCounter = 0; adapter->lastCmdConsumer = 0; adapter->cmdProducer = 0; UNM_WRITE_LOCK_IRQS(&adapter->adapter_lock, flags); unm_nic_pci_change_crbwindow(adapter, 1); UNM_WRITE_UNLOCK_IRQR(&adapter->adapter_lock, flags); unm_nic_update_cmd_producer(adapter, 0); unm_nic_update_cmd_consumer(adapter, 0); /* do this before waking up pegs so that we have valid dummy dma addr*/ err = initialize_adapter_offload(adapter); if (check_hw_init(adapter)!= 0) { printk("%s: hw init failed\n",unm_nic_driver_name); } version = (_UNM_NIC_LINUX_MAJOR << 16) | ((_UNM_NIC_LINUX_MINOR << 8)) | (_UNM_NIC_LINUX_SUBVERSION); UNM_NIC_PCI_WRITE_32(version, CRB_NORMALIZE(adapter, CRB_DRIVER_VERSION)); UNM_NIC_PCI_WRITE_32(1, CRB_NORMALIZE(adapter, UNM_ROMUSB_GLB_PEGTUNE_DONE)); err = init_firmware (adapter); if (err != 0) { printk(KERN_ERR "%s: Failed to init firmware\n", unm_nic_driver_name); return -EIO; } err = unm_nic_hw_resources (adapter); if (err) { DPRINTK(1, ERR, "Error in setting hw resources:" "%d\n", err); return err; } if ((nx_setup_vlan_buffers(adapter)) != 0) { unm_nic_free_hw_resources(adapter); nx_free_vlan_buffers(adapter); return -ENOMEM; } /*for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) { unm_post_rx_buffers(adapter, ring); }*/ if ((nx_setup_vmkbounce_buffers(adapter)) != 0) { nx_free_vmkbounce_buffers(adapter); nx_free_vlan_buffers(adapter); unm_nic_free_hw_resources(adapter); return -ENOMEM; } read_lock(&adapter->adapter_lock); unm_nic_enable_all_int(adapter); read_unlock(&adapter->adapter_lock); if (unm_nic_macaddr_set (adapter, adapter->mac_addr)!=0) { return -EIO; } if (unm_nic_init_port (adapter) != 0) { printk(KERN_ERR "%s: Failed to initialize the port %d\n", unm_nic_driver_name, adapter->portnum); return -EIO; } unm_nic_set_multi(netdev); netif_start_queue(netdev); adapter->state = PORT_UP; #endif return 0; }
static int pdp_activate(pdp_arg_t *pdp_arg, unsigned type, unsigned flags) { int ret; struct pdp_info *dev; struct net_device *net; DPRINTK(2, "BEGIN\n"); DPRINTK(1, "id: %d\n", pdp_arg->id); dev = kmalloc(sizeof(struct pdp_info) + MAX_PDP_PACKET_LEN, GFP_KERNEL); if (dev == NULL) { EPRINTK("out of memory\n"); return -ENOMEM; } memset(dev, 0, sizeof(struct pdp_info)); /* @LDK@ added by gykim on 20070203 for adjusting IPC 3.0 spec. */ if (type == DEV_TYPE_NET) { dev->id = pdp_arg->id + g_adjust; } else { dev->id = pdp_arg->id; } /* @LDK@ added by gykim on 20070203 for adjusting IPC 3.0 spec. */ dev->type = type; dev->flags = flags; dev->tx_buf = (u8 *)(dev + 1); if (type == DEV_TYPE_NET) { net = vnet_add_dev((void *)dev); if (net == NULL) { kfree(dev); return -ENOMEM; } dev->vn_dev.net = net; strcpy(pdp_arg->ifname, net->name); ret = pdp_add_dev(dev); if (ret < 0) { EPRINTK("pdp_add_dev() failed\n"); vnet_del_dev(dev->vn_dev.net); kfree(dev); return ret; } DPRINTK(1, "%s(id: %u) network device created\n", net->name, dev->id); } else if (type == DEV_TYPE_SERIAL) { init_MUTEX(&dev->vs_dev.write_lock); strcpy(dev->vs_dev.tty_name, pdp_arg->ifname); ret = vs_add_dev(dev); if (ret < 0) { kfree(dev); return ret; } ret = pdp_add_dev(dev); if (ret < 0) { EPRINTK("pdp_add_dev() failed\n"); vs_del_dev(dev); kfree(dev); return ret; } { struct tty_driver * tty_driver = get_tty_driver_by_id(dev); DPRINTK(1, "%s(id: %u) serial device is created.\n", tty_driver->name, dev->id); } } DPRINTK(2, "END\n"); return 0; }
/* Terminate a negotiated mode. */ static void parport_ieee1284_terminate (struct parport *port) { int r; port = port->physport; /* EPP terminates differently. */ switch (port->ieee1284.mode) { case IEEE1284_MODE_EPP: case IEEE1284_MODE_EPPSL: case IEEE1284_MODE_EPPSWE: /* Terminate from EPP mode. */ /* Event 68: Set nInit low */ parport_frob_control (port, PARPORT_CONTROL_INIT, 0); udelay (50); /* Event 69: Set nInit high, nSelectIn low */ parport_frob_control (port, PARPORT_CONTROL_SELECT | PARPORT_CONTROL_INIT, PARPORT_CONTROL_SELECT | PARPORT_CONTROL_INIT); break; case IEEE1284_MODE_ECP: case IEEE1284_MODE_ECPRLE: case IEEE1284_MODE_ECPSWE: /* In ECP we can only terminate from fwd idle phase. */ if (port->ieee1284.phase != IEEE1284_PH_FWD_IDLE) { /* Event 47: Set nInit high */ parport_frob_control (port, PARPORT_CONTROL_INIT | PARPORT_CONTROL_AUTOFD, PARPORT_CONTROL_INIT | PARPORT_CONTROL_AUTOFD); /* Event 49: PError goes high */ r = parport_wait_peripheral (port, PARPORT_STATUS_PAPEROUT, PARPORT_STATUS_PAPEROUT); if (r) DPRINTK (KERN_INFO "%s: Timeout at event 49\n", port->name); parport_data_forward (port); DPRINTK (KERN_DEBUG "%s: ECP direction: forward\n", port->name); port->ieee1284.phase = IEEE1284_PH_FWD_IDLE; } /* fall-though.. */ default: /* Terminate from all other modes. */ /* Event 22: Set nSelectIn low, nAutoFd high */ parport_frob_control (port, PARPORT_CONTROL_SELECT | PARPORT_CONTROL_AUTOFD, PARPORT_CONTROL_SELECT); /* Event 24: nAck goes low */ r = parport_wait_peripheral (port, PARPORT_STATUS_ACK, 0); if (r) DPRINTK (KERN_INFO "%s: Timeout at event 24\n", port->name); /* Event 25: Set nAutoFd low */ parport_frob_control (port, PARPORT_CONTROL_AUTOFD, PARPORT_CONTROL_AUTOFD); /* Event 27: nAck goes high */ r = parport_wait_peripheral (port, PARPORT_STATUS_ACK, PARPORT_STATUS_ACK); if (r) DPRINTK (KERN_INFO "%s: Timeout at event 27\n", port->name); /* Event 29: Set nAutoFd high */ parport_frob_control (port, PARPORT_CONTROL_AUTOFD, 0); } port->ieee1284.mode = IEEE1284_MODE_COMPAT; port->ieee1284.phase = IEEE1284_PH_FWD_IDLE; DPRINTK (KERN_DEBUG "%s: In compatibility (forward idle) mode\n", port->name); }
static inline void dpram_close(struct file *filp) { DPRINTK(2, "BEGIN\n"); filp_close(filp, current->files); DPRINTK(2, "END\n"); }
ssize_t parport_write (struct parport *port, const void *buffer, size_t len) { #ifndef CONFIG_PARPORT_1284 return port->ops->compat_write_data (port, buffer, len, 0); #else ssize_t retval; int mode = port->ieee1284.mode; int addr = mode & IEEE1284_ADDR; size_t (*fn) (struct parport *, const void *, size_t, int); /* Ignore the device-ID-request bit and the address bit. */ mode &= ~(IEEE1284_DEVICEID | IEEE1284_ADDR); /* Use the mode we're in. */ switch (mode) { case IEEE1284_MODE_NIBBLE: case IEEE1284_MODE_BYTE: parport_negotiate (port, IEEE1284_MODE_COMPAT); case IEEE1284_MODE_COMPAT: DPRINTK (KERN_DEBUG "%s: Using compatibility mode\n", port->name); fn = port->ops->compat_write_data; break; case IEEE1284_MODE_EPP: DPRINTK (KERN_DEBUG "%s: Using EPP mode\n", port->name); if (addr) { fn = port->ops->epp_write_addr; } else { fn = port->ops->epp_write_data; } break; case IEEE1284_MODE_EPPSWE: DPRINTK (KERN_DEBUG "%s: Using software-emulated EPP mode\n", port->name); if (addr) { fn = parport_ieee1284_epp_write_addr; } else { fn = parport_ieee1284_epp_write_data; } break; case IEEE1284_MODE_ECP: case IEEE1284_MODE_ECPRLE: DPRINTK (KERN_DEBUG "%s: Using ECP mode\n", port->name); if (addr) { fn = port->ops->ecp_write_addr; } else { fn = port->ops->ecp_write_data; } break; case IEEE1284_MODE_ECPSWE: DPRINTK (KERN_DEBUG "%s: Using software-emulated ECP mode\n", port->name); /* The caller has specified that it must be emulated, * even if we have ECP hardware! */ if (addr) { fn = parport_ieee1284_ecp_write_addr; } else { fn = parport_ieee1284_ecp_write_data; } break; default: DPRINTK (KERN_DEBUG "%s: Unknown mode 0x%02x\n", port->name, port->ieee1284.mode); return -ENOSYS; } retval = (*fn) (port, buffer, len, 0); DPRINTK (KERN_DEBUG "%s: wrote %d/%d bytes\n", port->name, retval, len); return retval; #endif /* IEEE1284 support */ }
static int dpram_thread(void *data) { int ret = 0; unsigned long flag; struct file *filp; struct sched_param schedpar; DPRINTK(2, "BEGIN\n"); dpram_task = current; daemonize("dpram_thread"); strcpy(current->comm, "multipdp"); schedpar.sched_priority = 1; sched_setscheduler(current, SCHED_FIFO, &schedpar); /* set signals to accept */ siginitsetinv(¤t->blocked, sigmask(SIGUSR1)); recalc_sigpending(); filp = dpram_open(); if (filp == NULL) { goto out; } dpram_filp = filp; /* send start signal */ complete(&dpram_complete); while (1) { ret = dpram_poll(filp); if (ret == -ERESTARTSYS) { if (sigismember(¤t->pending.signal, SIGUSR1)) { sigdelset(¤t->pending.signal, SIGUSR1); recalc_sigpending(); ret = 0; break; } } else if (ret < 0) { EPRINTK("dpram_poll() failed\n"); break; } else { char ch; ret = dpram_read(dpram_filp, &ch, sizeof(ch)); if(ret < 0) { return ret; } if (ch == 0x7f) { pdp_demux(); } } } dpram_close(filp); dpram_filp = NULL; out: dpram_task = NULL; /* send finish signal and exit */ complete_and_exit(&dpram_complete, ret); DPRINTK(2, "END\n"); }
int __init stmhdmi_create(int id, dev_t firstdevice, struct stmcore_display_pipeline_data *platform_data) { struct stm_hdmi *hdmi; struct i2c_adapter *i2c; char *paramstring; if(!platform_data) { printk(KERN_ERR "platform data pointer is NULL\n"); BUG(); return -EINVAL; } i2c = i2c_get_adapter(platform_data->hdmi_i2c_adapter_id); if(!i2c) { printk(KERN_ERR "HDMI i2c bus (%d) not available, check your kernel configuration and board setup\n",platform_data->hdmi_i2c_adapter_id); return -EINVAL; } platform_data->hdmi_data = NULL; if((hdmi = stmhdmi_create_hdmi_dev_struct()) == NULL) return -ENOMEM; DPRINTK("new hdmi structure = %p\n",hdmi); #ifdef __TDT__ //Dagobert HACK_dev = hdmi; #endif /* * Note that we reuse the device handle from the platform data. */ hdmi->device = platform_data->device; hdmi->irq = -1; hdmi->i2c_adapter = i2c; hdmi->video_type = STM_VIDEO_OUT_RGB; switch(id) { case 0: paramstring = hdmi0; break; default: paramstring = NULL; break; } if(paramstring) { if(paramstring[0] == 'd' || paramstring[0] == 'D') { printk(KERN_WARNING "hdmi%d.0 is initially disabled, use 'stfbset -e hdmi' to enable it\n",id); hdmi->disable = 1; } } /* * Set the default CEA selection behaviour to use the aspect ratio in the EDID */ hdmi->cea_mode_from_edid = 1; #ifdef __TDT__ hdmi->non_strict_edid_semantics = STMHDMIIO_EDID_NON_STRICT_MODE_HANDLING; #endif /* * Copy the display runtime pointer for the vsync callback handling. */ hdmi->display_runtime = platform_data->display_runtime; /* * Note that we are trusting the output identifiers are valid * and pointing to correct output types. */ hdmi->main_output = stm_display_get_output(hdmi->device, platform_data->main_output_id); hdmi->hdmi_output = stm_display_get_output(hdmi->device, platform_data->hdmi_output_id); if(hdmi->main_output == NULL || hdmi->hdmi_output == NULL) { DPRINTK("Cannot get display outputs main = %d, hdmi = %d\n",platform_data->main_output_id,platform_data->hdmi_output_id); stmhdmi_destroy(hdmi); return -ENODEV; } if(stm_display_output_get_capabilities(hdmi->hdmi_output, &hdmi->capabilities)<0) { DPRINTK("Cannot get hdmi output capabilities\n"); stmhdmi_destroy(hdmi); return -ENODEV; } if(!(hdmi->capabilities & STM_OUTPUT_CAPS_TMDS)) { printk(KERN_ERR "Provided HDMI output identifier doesn't support TMDS??\n"); stmhdmi_destroy(hdmi); return -ENODEV; } if(request_irq(platform_data->hdmi_irq, stmhdmi_interrupt, IRQF_DISABLED, "hdmi", hdmi->hdmi_output)) { printk(KERN_ERR "Cannot get HDMI irq = %d\n",platform_data->hdmi_irq); stmhdmi_destroy(hdmi); return -ENODEV; } hdmi->irq = platform_data->hdmi_irq; if(stmhdmi_create_spd_metadata(hdmi)) { stmhdmi_destroy(hdmi); return -ENOMEM; } /* * If we split the HDMI management into another module then we should change * the owner field in the callback info to THIS_MODULE. However this is * linked into the coredisplay module at the moment we do not want to have * another reference to ourselves. */ INIT_LIST_HEAD(&(hdmi->vsync_cb_info.node)); hdmi->vsync_cb_info.owner = NULL; hdmi->vsync_cb_info.context = hdmi; hdmi->vsync_cb_info.cb = stmhdmi_vsync_cb; if(stmcore_register_vsync_callback(hdmi->display_runtime, &hdmi->vsync_cb_info)<0) { printk(KERN_ERR "Cannot register hdmi vsync callback\n"); return -ENODEV; } hdmi->thread = kthread_run(stmhdmi_manager,hdmi,"hdmid/%d",id); if (hdmi->thread == NULL) { printk(KERN_ERR "Cannot start hdmi thread id = %d\n",id); stmhdmi_destroy(hdmi); return -ENOMEM; } platform_data->hdmi_data = hdmi; if(stmhdmi_register_device(hdmi, id, firstdevice, platform_data)) { stmhdmi_destroy(hdmi); return -ENODEV; } return 0; }
static int vs_write_room(struct tty_struct *tty) { DPRINTK(2, "BEGIN\n"); DPRINTK(2, "END\n"); return 8192*2; }
static int stmhdmi_set_isrc_data(struct stm_hdmi *dev, unsigned long arg) { stm_meta_data_result_t res; stm_meta_data_t *metadata; stm_hdmi_isrc_data_t *isrc; struct stmhdmiio_isrc_data isrcdata; if (copy_from_user(&isrcdata,(void*)arg,sizeof(isrcdata))) return -EFAULT; /* * Don't allow the configuration of ISRC packets unless the * connected TV has the supports AI flag set in its EDID. */ if((dev->edid_info.display_type != STM_DISPLAY_HDMI) || (dev->edid_info.hdmi_vsdb_flags & STM_HDMI_VSDB_SUPPORTS_AI) == 0) { DPRINTK("Not Sending ISRC Datapackets, sink does not support AI\n"); return -EPERM; } if((metadata = kzalloc(sizeof(stm_meta_data_t)+sizeof(stm_hdmi_isrc_data_t),GFP_KERNEL)) == 0) return -ENOMEM; metadata->size = sizeof(stm_meta_data_t)+sizeof(stm_hdmi_isrc_data_t); metadata->release = (void(*)(struct stm_meta_data_s*))kfree; metadata->type = STM_METADATA_TYPE_ISRC_DATA; metadata->presentationTime = ((TIME64)isrcdata.timestamp.tv_sec * USEC_PER_SEC) + (TIME64)isrcdata.timestamp.tv_usec; isrc = (stm_hdmi_isrc_data_t*)&metadata->data[0]; isrc->isrc1.type = HDMI_ISRC1_PACKET_TYPE; isrc->isrc2.type = HDMI_ISRC2_PACKET_TYPE; if(isrcdata.status != ISRC_STATUS_DISABLE) { int i; isrc->isrc1.version = (isrcdata.status & HDMI_ISRC1_STATUS_MASK) | HDMI_ISRC1_VALID; /* * Just copy the first 16 bytes of information to ISRC1 */ memcpy(isrc->isrc1.data,&isrcdata.upc_ean_isrc[0],16); /* * For the second 16 bytes we need to see if there is any non-zero data in * there. If not then the second ISRC packet will not be transmitted. */ for(i=16;i<32;i++) { if(isrcdata.upc_ean_isrc[i] != 0) { isrc->isrc1.version |= HDMI_ISRC1_CONTINUED; isrc->isrc2.data[i-16] = isrcdata.upc_ean_isrc[i]; } } } DPRINTK("Sending ISRC Datapackets\n"); if(stm_display_output_queue_metadata(dev->hdmi_output, metadata, &res)<0) { kfree(metadata); if(signal_pending(current)) return -ERESTARTSYS; else return -EIO; } return stmhdmi_convert_metadata_result_to_errno(res); }
static int vs_chars_in_buffer(struct tty_struct *tty) { DPRINTK(2, "BEGIN\n"); DPRINTK(2, "END\n"); return 0; }
static ssize_t ncp_file_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct dentry *dentry = file->f_dentry; struct inode *inode = dentry->d_inode; size_t already_written = 0; off_t pos; size_t bufsize; int errno; void* bouncebuffer; DPRINTK("ncp_file_write: enter %s/%s\n", dentry->d_parent->d_name.name, dentry->d_name.name); if (!ncp_conn_valid(NCP_SERVER(inode))) return -EIO; if (!S_ISREG(inode->i_mode)) { DPRINTK("ncp_file_write: write to non-file, mode %07o\n", inode->i_mode); return -EINVAL; } if ((ssize_t) count < 0) return -EINVAL; pos = *ppos; if (file->f_flags & O_APPEND) { pos = inode->i_size; } if (pos + count > MAX_NON_LFS && !(file->f_flags&O_LARGEFILE)) { if (pos >= MAX_NON_LFS) { send_sig(SIGXFSZ, current, 0); return -EFBIG; } if (count > MAX_NON_LFS - (u32)pos) { count = MAX_NON_LFS - (u32)pos; } } if (pos >= inode->i_sb->s_maxbytes) { if (count || pos > inode->i_sb->s_maxbytes) { send_sig(SIGXFSZ, current, 0); return -EFBIG; } } if (pos + count > inode->i_sb->s_maxbytes) { count = inode->i_sb->s_maxbytes - pos; } if (!count) return 0; errno = ncp_make_open(inode, O_WRONLY); if (errno) { DPRINTK(KERN_ERR "ncp_file_write: open failed, error=%d\n", errno); return errno; } bufsize = NCP_SERVER(inode)->buffer_size; already_written = 0; bouncebuffer = vmalloc(bufsize); if (!bouncebuffer) { errno = -EIO; /* -ENOMEM */ goto outrel; } while (already_written < count) { int written_this_time; size_t to_write = min_t(unsigned int, bufsize - (pos % bufsize), count - already_written); if (copy_from_user(bouncebuffer, buf, to_write)) { errno = -EFAULT; break; } if (ncp_write_kernel(NCP_SERVER(inode), NCP_FINFO(inode)->file_handle, pos, to_write, bouncebuffer, &written_this_time) != 0) { errno = -EIO; break; } pos += written_this_time; buf += written_this_time; already_written += written_this_time; if (written_this_time != to_write) { break; } } vfree(bouncebuffer); inode->i_mtime = inode->i_atime = CURRENT_TIME; *ppos = pos; if (pos > inode->i_size) { inode->i_size = pos; } DPRINTK("ncp_file_write: exit %s/%s\n", dentry->d_parent->d_name.name, dentry->d_name.name); outrel: ncp_inode_close(inode); return already_written ? already_written : errno; }
static int vs_read(struct pdp_info *dev, size_t len, int vs_id) { int retval = 0; u32 size; u32 copied_size; int insert_size = 0; DPRINTK(2, "BEGIN\n"); if (dev) { /* pdp data length. */ if (len > MAX_PDP_DATA_LEN) { // RF cal data? DPRINTK(1, "CAL DATA\n"); size = dpram_read(dpram_filp, prx_buf, len); DPRINTK(1, "multipdp_thread request read size : %d readed size %d, count : %d\n",len ,size,count); if ((dev->id == 1 && !fp_vsCSD) || (dev->id == 5 && !fp_vsGPS) || (dev->id == 8 && !fp_vsEFS)|| (dev->id == 25 && !fp_vsSMD)){ EPRINTK("vs_read : %s, discard data.\n", dev->vs_dev.tty->name); } else { while (size) { copied_size = (size > MAX_PDP_DATA_LEN) ? MAX_PDP_DATA_LEN : size; if (size > 0 && dev->vs_dev.tty != NULL) insert_size = tty_insert_flip_string(dev->vs_dev.tty, prx_buf+retval, copied_size); if (insert_size != copied_size) { EPRINTK("flip buffer full : %s, insert size : %d, real size : %d\n",dev->vs_dev.tty->name,copied_size,insert_size); return -1; } size = size - copied_size; retval += copied_size; } DPRINTK(1, "retval : %d\n",retval); tty_flip_buffer_push(dev->vs_dev.tty); count++; } } else { retval = dpram_read(dpram_filp, pdp_rx_buf, len); if (retval != len) return retval; if(retval > 0){ if((dev->id == 1 && !fp_vsCSD) || (dev->id == 5 && !fp_vsGPS) || (dev->id == 8 && !fp_vsEFS)|| (dev->id == 25 && !fp_vsSMD)) { EPRINTK("vs_read : %s, discard data.\n", dev->vs_dev.tty->name); } else { insert_size = tty_insert_flip_string(dev->vs_dev.tty, pdp_rx_buf, retval); if (insert_size != retval) { EPRINTK("flip buffer full : %s, insert size : %d, real size : %d\n",dev->vs_dev.tty->name,retval,insert_size); return -1; } tty_flip_buffer_push(dev->vs_dev.tty); } } } } DPRINTK(2, "END\n"); return 0; }
/* * Open a file with the specified read/write mode. */ int ncp_make_open(struct inode *inode, int right) { int error; int access; error = -EINVAL; if (!inode) { printk(KERN_ERR "ncp_make_open: got NULL inode\n"); goto out; } DPRINTK("ncp_make_open: opened=%d, volume # %u, dir entry # %u\n", atomic_read(&NCP_FINFO(inode)->opened), NCP_FINFO(inode)->volNumber, NCP_FINFO(inode)->dirEntNum); error = -EACCES; down(&NCP_FINFO(inode)->open_sem); if (!atomic_read(&NCP_FINFO(inode)->opened)) { struct ncp_entry_info finfo; int result; /* tries max. rights */ finfo.access = O_RDWR; result = ncp_open_create_file_or_subdir(NCP_SERVER(inode), inode, NULL, OC_MODE_OPEN, 0, AR_READ | AR_WRITE, &finfo); if (!result) goto update; /* RDWR did not succeeded, try readonly or writeonly as requested */ switch (right) { case O_RDONLY: finfo.access = O_RDONLY; result = ncp_open_create_file_or_subdir(NCP_SERVER(inode), inode, NULL, OC_MODE_OPEN, 0, AR_READ, &finfo); break; case O_WRONLY: finfo.access = O_WRONLY; result = ncp_open_create_file_or_subdir(NCP_SERVER(inode), inode, NULL, OC_MODE_OPEN, 0, AR_WRITE, &finfo); break; } if (result) { PPRINTK("ncp_make_open: failed, result=%d\n", result); goto out_unlock; } /* * Update the inode information. */ update: ncp_update_inode(inode, &finfo); atomic_set(&NCP_FINFO(inode)->opened, 1); } access = NCP_FINFO(inode)->access; PPRINTK("ncp_make_open: file open, access=%x\n", access); if (access == right || access == O_RDWR) { atomic_inc(&NCP_FINFO(inode)->opened); error = 0; } out_unlock: up(&NCP_FINFO(inode)->open_sem); out: return error; }
void netfront_accel_msg_from_bend(void *context) #endif { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) netfront_accel_vnic *vnic = container_of(context, netfront_accel_vnic, msg_from_bend); #else netfront_accel_vnic *vnic = (netfront_accel_vnic *)context; #endif struct net_accel_msg msg; int err, queue_was_full = 0; mutex_lock(&vnic->vnic_mutex); /* * This happens when the shared pages have been unmapped but * the workqueue has yet to be flushed */ if (!vnic->dom0_state_is_setup) goto unlock_out; while ((vnic->shared_page->aflags & NET_ACCEL_MSG_AFLAGS_TO_DOMU_MASK) != 0) { if (vnic->shared_page->aflags & NET_ACCEL_MSG_AFLAGS_QUEUEUNOTFULL) { /* We've been told there may now be space. */ clear_bit(NET_ACCEL_MSG_AFLAGS_QUEUEUNOTFULL_B, (unsigned long *)&vnic->shared_page->aflags); } if (vnic->shared_page->aflags & NET_ACCEL_MSG_AFLAGS_QUEUE0FULL) { /* * There will be space at the end of this * function if we can make any. */ clear_bit(NET_ACCEL_MSG_AFLAGS_QUEUE0FULL_B, (unsigned long *)&vnic->shared_page->aflags); queue_was_full = 1; } if (vnic->shared_page->aflags & NET_ACCEL_MSG_AFLAGS_NETUPDOWN) { DPRINTK("%s: net interface change\n", __FUNCTION__); clear_bit(NET_ACCEL_MSG_AFLAGS_NETUPDOWN_B, (unsigned long *)&vnic->shared_page->aflags); if (vnic->shared_page->net_dev_up) netfront_accel_interface_up(vnic); else netfront_accel_interface_down(vnic); } } /* Pull msg out of shared memory */ while ((err = net_accel_msg_recv(vnic->shared_page, &vnic->from_dom0, &msg)) == 0) { err = vnic_process_rx_msg(vnic, &msg); if (err != 0) goto done; } /* * Send any pending buffer map request messages that we can, * and mark domU->dom0 as full if necessary. */ if (vnic->msg_state == NETFRONT_ACCEL_MSG_HW && vnic->bufpages.page_reqs < vnic->bufpages.max_pages) { if (vnic_send_buffer_requests(vnic, &vnic->bufpages) == -ENOSPC) vnic_set_queue_full(vnic); } /* * If there are no messages then this is not an error. It * just means that we've finished processing the queue. */ if (err == -ENOENT) err = 0; done: /* We will now have made space in the dom0->domU queue if we can */ if (queue_was_full) vnic_set_queue_not_full(vnic); if (err != 0) { EPRINTK("%s returned %d\n", __FUNCTION__, err); netfront_accel_set_closing(vnic); } unlock_out: mutex_unlock(&vnic->vnic_mutex); return; }