static int sdev_runtime_resume(struct device *dev) { struct scsi_device *sdev = to_scsi_device(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; int err = 0; blk_pre_runtime_resume(sdev->request_queue); if (pm && pm->runtime_resume) err = pm->runtime_resume(dev); blk_post_runtime_resume(sdev->request_queue, err); return err; }
static int scsi_dev_type_suspend(struct device *dev, pm_message_t msg) { struct device_driver *drv; int err; err = scsi_device_quiesce(to_scsi_device(dev)); if (err == 0) { drv = dev->driver; if (drv && drv->suspend) err = drv->suspend(dev, msg); } dev_dbg(dev, "scsi suspend: %d\n", err); return err; }
/** * proc_print_scsidevice - return data about this host * @dev: A scsi device * @data: &struct seq_file to output to. * * Description: prints Host, Channel, Id, Lun, Vendor, Model, Rev, Type, * and revision. */ static int proc_print_scsidevice(struct device *dev, void *data) { struct scsi_device *sdev; struct seq_file *s = data; int i; if (!scsi_is_sdev_device(dev)) goto out; sdev = to_scsi_device(dev); seq_printf(s, "Host: scsi%d Channel: %02d Id: %02d Lun: %02d\n Vendor: ", sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); for (i = 0; i < 8; i++) { if (sdev->vendor[i] >= 0x20) seq_printf(s, "%c", sdev->vendor[i]); else seq_printf(s, " "); } seq_printf(s, " Model: "); for (i = 0; i < 16; i++) { if (sdev->model[i] >= 0x20) seq_printf(s, "%c", sdev->model[i]); else seq_printf(s, " "); } seq_printf(s, " Rev: "); for (i = 0; i < 4; i++) { if (sdev->rev[i] >= 0x20) seq_printf(s, "%c", sdev->rev[i]); else seq_printf(s, " "); } seq_printf(s, "\n"); seq_printf(s, " Type: %s ", scsi_device_type(sdev->type)); seq_printf(s, " ANSI SCSI revision: %02x", sdev->scsi_level - (sdev->scsi_level > 1)); if (sdev->scsi_level == 2) seq_printf(s, " CCS\n"); else seq_printf(s, "\n"); out: return 0; }
static int sdev_runtime_suspend(struct device *dev) { const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; struct scsi_device *sdev = to_scsi_device(dev); int err = 0; err = blk_pre_runtime_suspend(sdev->request_queue); if (err) return err; if (pm && pm->runtime_suspend) err = pm->runtime_suspend(dev); blk_post_runtime_suspend(sdev->request_queue, err); return err; }
static int sdev_runtime_suspend(struct device *dev) { const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; int (*cb)(struct device *) = pm ? pm->runtime_suspend : NULL; struct scsi_device *sdev = to_scsi_device(dev); int err; if (sdev->request_queue->dev) return sdev_blk_runtime_suspend(sdev, cb); err = scsi_dev_type_suspend(dev, cb); if (err == -EAGAIN) pm_schedule_suspend(dev, jiffies_to_msecs( round_jiffies_up_relative(HZ/10))); return err; }
/** * edd_match_scsidev() * @edev - EDD device is a known SCSI device * @sd - scsi_device with host who's parent is a PCI controller * * returns 1 if a match is found, 0 if not. */ static int edd_match_scsidev(struct device * dev, void * d) { struct edd_match_data * data = (struct edd_match_data *)d; struct edd_info *info = edd_dev_get_info(data->edev); struct scsi_device * sd = to_scsi_device(dev); if (info) { if ((sd->channel == info->params.interface_path.pci.channel) && (sd->id == info->params.device_path.scsi.id) && (sd->lun == info->params.device_path.scsi.lun)) { data->sd = sd; return 1; } } return 0; }
static int scsi_dev_type_resume(struct device *dev, int (*cb)(struct device *, const struct dev_pm_ops *)) { const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; int err = 0; err = cb(dev, pm); scsi_device_resume(to_scsi_device(dev)); dev_dbg(dev, "scsi resume: %d\n", err); if (err == 0) { pm_runtime_disable(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); } return err; }
static int scsi_bus_resume_common(struct device *dev, int (*cb)(struct device *, const struct dev_pm_ops *)) { async_func_t fn; if (!scsi_is_sdev_device(dev)) fn = NULL; else if (cb == do_scsi_resume) fn = async_sdev_resume; else if (cb == do_scsi_thaw) fn = async_sdev_thaw; else if (cb == do_scsi_restore) fn = async_sdev_restore; else fn = NULL; /* * Forcibly set runtime PM status of request queue to "active" to * make sure we can again get requests from the queue (see also * blk_pm_peek_request()). * * The resume hook will correct runtime PM status of the disk. */ if (scsi_is_sdev_device(dev) && pm_runtime_suspended(dev)) blk_set_runtime_active(to_scsi_device(dev)->request_queue); if (fn) { async_schedule_domain(fn, dev, &scsi_sd_pm_domain); /* * If a user has disabled async probing a likely reason * is due to a storage enclosure that does not inject * staggered spin-ups. For safety, make resume * synchronous as well in that case. */ if (strncmp(scsi_scan_type, "async", 5) != 0) async_synchronize_full_domain(&scsi_sd_pm_domain); } else { pm_runtime_disable(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); } return 0; }
static int sandman_suspend_disk(char *name) { int err = 0; bool manage_start_stop = false; struct device *device = NULL; struct device *parent = NULL; struct scsi_device *sdev = NULL; device = sandman_find_dev(name); if (IS_ERR(device)) { printk(KERN_WARNING "[Sandman] %s: no disk found.\n", name); err = PTR_ERR(device); goto fail; } /* * Ensure that manage_start_stop is enabled */ parent = device->parent; sdev = to_scsi_device(parent); if (!sdev) return 1; if (sdev->manage_start_stop) manage_start_stop = true; else sdev->manage_start_stop = 1; err = pm_runtime_force_suspend(parent); sdev->manage_start_stop = manage_start_stop ? 1 : 0; if (err) goto fail; if (parent->power.runtime_status != RPM_SUSPENDED) { err = 2; goto fail; } return 0; fail: return err; }
static int sm_probe(struct device* dev) { struct scsi_device *sdp = to_scsi_device(dev); int error = -ENODEV; if(sdp->type != TYPE_MEM) goto OUT; LogPath(); struct gendisk* gd = NULL; gd = alloc_disk(1); if(!gd) { Log("[Error] alloc_disk failed."); return -1; } gd->major = SM_MAJOR; gd->first_minor = 0; gd->fops = &sm_fops; gd->private_data = sdp; sprintf(gd->disk_name,"sm-scsi"); gd->queue = sdp->request_queue; gd->driverfs_dev = &sdp->sdev_gendev; //gd->flags = GENHD_FL_DRIVERFS; if(sdp->removable) gd->flags |= GENHD_FL_REMOVABLE; //dev->p->driver_data = (void*)gd; dev_set_drvdata(dev,(void*)gd); sm_spinup_mem_disk(sdp); sm_read_capacity(sdp); set_capacity(gd,g_mem_capacity >> 9); blk_queue_prep_rq(sdp->request_queue,sm_prep_fn); add_disk(gd); return 0; OUT: return error; }
static int scsi_runtime_idle(struct device *dev) { int err; dev_dbg(dev, "scsi_runtime_idle\n"); /* Insert hooks here for targets, hosts, and transport classes */ if (scsi_is_sdev_device(dev)) { struct scsi_device *sdev = to_scsi_device(dev); if (sdev->request_queue->dev) { pm_runtime_mark_last_busy(dev); err = pm_runtime_autosuspend(dev); } else { err = pm_runtime_suspend(dev); } } else { err = pm_runtime_suspend(dev); } return err; }
/* * TODO: need some interface so we can set trespass values */ static int clariion_bus_notify(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; struct scsi_device *sdev; struct scsi_dh_data *scsi_dh_data; struct clariion_dh_data *h; int i, found = 0; unsigned long flags; if (!scsi_is_sdev_device(dev)) return 0; sdev = to_scsi_device(dev); if (action == BUS_NOTIFY_ADD_DEVICE) { for (i = 0; clariion_dev_list[i].vendor; i++) { if (!strncmp(sdev->vendor, clariion_dev_list[i].vendor, strlen(clariion_dev_list[i].vendor)) && !strncmp(sdev->model, clariion_dev_list[i].model, strlen(clariion_dev_list[i].model))) { found = 1; break; } } if (!found) goto out; scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *) + sizeof(*h) , GFP_KERNEL); if (!scsi_dh_data) { sdev_printk(KERN_ERR, sdev, "Attach failed %s.\n", CLARIION_NAME); goto out; } scsi_dh_data->scsi_dh = &clariion_dh; h = (struct clariion_dh_data *) scsi_dh_data->buf; h->default_sp = CLARIION_UNBOUND_LU; h->current_sp = CLARIION_UNBOUND_LU; spin_lock_irqsave(sdev->request_queue->queue_lock, flags); sdev->scsi_dh_data = scsi_dh_data; spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); sdev_printk(KERN_NOTICE, sdev, "Attached %s.\n", CLARIION_NAME); try_module_get(THIS_MODULE); } else if (action == BUS_NOTIFY_DEL_DEVICE) { if (sdev->scsi_dh_data == NULL || sdev->scsi_dh_data->scsi_dh != &clariion_dh) goto out; spin_lock_irqsave(sdev->request_queue->queue_lock, flags); scsi_dh_data = sdev->scsi_dh_data; sdev->scsi_dh_data = NULL; spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); sdev_printk(KERN_NOTICE, sdev, "Dettached %s.\n", CLARIION_NAME); kfree(scsi_dh_data); module_put(THIS_MODULE); } out: return 0; }
/* Output routine for the sysfs max_sectors file */ static ssize_t show_max_sectors(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); return sprintf(buf, "%u\n", sdev->request_queue->max_sectors); }
int fips_fmp_init(struct device *dev) { struct ufs_fmp_work *work; struct device_node *dev_node; struct platform_device *pdev_ufs; struct device *dev_ufs; struct ufs_hba *hba; struct Scsi_Host *host; struct inode *inode; struct scsi_device *sdev; struct super_block *sb; unsigned long blocksize; unsigned char blocksize_bits; sector_t self_test_block; fmode_t fmode = FMODE_WRITE | FMODE_READ; work = kmalloc(sizeof(*work), GFP_KERNEL); if (!work) { dev_err(dev, "Fail to alloc fmp work buffer\n"); return -ENOMEM; } dev_node = of_find_compatible_node(NULL, NULL, "samsung,exynos-ufs"); if (!dev_node) { dev_err(dev, "Fail to find exynos ufs device node\n"); goto out; } pdev_ufs = of_find_device_by_node(dev_node); if (!pdev_ufs) { dev_err(dev, "Fail to find exynos ufs pdev\n"); goto out; } dev_ufs = &pdev_ufs->dev; hba = dev_get_drvdata(dev_ufs); if (!hba) { dev_err(dev, "Fail to find hba from dev\n"); goto out; } host = hba->host; sdev = to_scsi_device(dev_ufs); work->host = host; work->sdev = sdev; work->devt = find_devt_for_selftest(dev); if (!work->devt) { dev_err(dev, "Fail to find devt for self test\n"); return -ENODEV; } work->bdev = blkdev_get_by_dev(work->devt, fmode, NULL); if (IS_ERR(work->bdev)) { dev_err(dev, "Fail to open block device\n"); return -ENODEV; } inode = work->bdev->bd_inode; sb = inode->i_sb; blocksize = sb->s_blocksize; blocksize_bits = sb->s_blocksize_bits; self_test_block = (i_size_read(inode) - (blocksize * SF_BLK_OFFSET)) >> blocksize_bits; work->sector = self_test_block; dev_set_drvdata(dev, work); return 0; out: if (work) kfree(work); return -ENODEV; }