struct hd_struct *get_part(char *name) { dev_t devt; int partno; struct disk_part_iter piter; struct gendisk *disk; struct hd_struct *part = NULL; if (!name) return part; devt = blk_lookup_devt("mmcblk0", 0); disk = get_gendisk(devt, &partno); if (!disk || get_capacity(disk) == 0) return 0; disk_part_iter_init(&piter, disk, 0); while ((part = disk_part_iter_next(&piter))) { if (part->info && !strcmp(part->info->volname, name)) { get_device(part_to_dev(part)); break; } } disk_part_iter_exit(&piter); return part; }
static int partinfo_show_proc(struct seq_file *m, void *v) { dev_t devt; int partno; struct disk_part_iter piter; struct gendisk *disk; struct hd_struct *part; u64 last = 0; devt = blk_lookup_devt("mmcblk0", 0); disk = get_gendisk(devt, &partno), seq_printf(m, "%-16s %-16s\t%-16s\n", "Name", "Start", "Size"); if (!disk || get_capacity(disk) == 0) return 0; disk_part_iter_init(&piter, disk, 0); seq_printf(m, "%-16s 0x%016llx\t0x%016llx\n", "pgpt", 0ULL, 512 * 1024ULL); while ((part = disk_part_iter_next(&piter))) { seq_printf(m, "%-16s 0x%016llx\t0x%016llx\n", part->info ? (char *)(part->info->volname) : "unknown", (u64)part->start_sect * 512, (u64)part->nr_sects * 512); last = (part->start_sect + part->nr_sects) * 512; } seq_printf(m, "%-16s 0x%016llx\t0x%016llx\n", "sgpt", last, 512 * 1024ULL); disk_part_iter_exit(&piter); return 0; }
static ssize_t upgrade_proc_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { dev_t devt; int partno; struct gendisk *disk; struct partition_package *package; int len; size_t ret; devt = blk_lookup_devt("mmcblk0", 0); disk = get_gendisk(devt, &partno); if (!disk || get_capacity(disk) == 0) return 0; package = alloc_partition_package(disk, &len); if (!package) { ret = -ENOMEM; part_err("upgrade_proc_read: fail to malloc package\n"); goto fail_malloc; } get_partition_package(disk, package); ret = simple_read_from_buffer(buf, count, ppos, package, len); kfree(package); fail_malloc: return ret; }
/* * Loop through every CONFIG_MMC_BLOCK_MINORS'th minor device for * MMC_BLOCK_MAJOR, get the struct gendisk for each device. Returns * nr of found disks. Populate mmc_disks. */ static int scan_mmc_devices(struct gendisk *mmc_disks[]) { dev_t devnr; int i, j = 0, part; struct gendisk *mmc_devices[256 / CONFIG_MMC_BLOCK_MINORS]; memset(&mmc_devices, 0, sizeof(mmc_devices)); for (i = 0; i * CONFIG_MMC_BLOCK_MINORS < 256; i++) { devnr = MKDEV(MMC_BLOCK_MAJOR, i * CONFIG_MMC_BLOCK_MINORS); mmc_devices[i] = get_gendisk(devnr, &part); /* Invalid capacity of device, do not add to list */ if (!mmc_devices[i] || !get_capacity(mmc_devices[i])) continue; mmc_disks[j] = mmc_devices[i]; j++; if (j == PERF_MMC_HOSTS) break; } return j; }
static __init int start_module(void) { dev=name_to_dev_t(device); disk = get_gendisk(dev,&dummy); if(!disk) return 0; queue=disk->queue; getrawmonotonic(&init_time); block_fun=my_block_fun; block_requeue=my_block_requeue; block_comp=my_block_comp; return 0; }
/* * NOTE: this cannot be called from interrupt context. * * But in interrupt context you should really have a struct * block_device anyway and use bdevname() above. */ const char *__bdevname(dev_t dev, char *buffer) { struct gendisk *disk; int part; disk = get_gendisk(dev, &part); if (disk) { buffer = disk_name(disk, part, buffer); put_disk(disk); } else { snprintf(buffer, BDEVNAME_SIZE, "unknown-block(%u,%u)", MAJOR(dev), MINOR(dev)); } return buffer; }
/* * Get partitioning information */ static struct partsfs_state *get_partitions_info(struct super_block *sb, int silent) { struct parsed_partitions *partitions; struct gendisk *disk; struct partsfs_state *state; int partno; int p; disk = get_gendisk(sb->s_bdev->bd_dev, &partno); if (!disk) { if (!silent) printk(KERN_WARNING "PARTSFS: Error getting partition information (get_gendisk failed)\n"); return NULL; } state = kzalloc(sizeof(struct partsfs_state), GFP_KERNEL); partitions = check_partition(disk, sb->s_bdev); if (IS_ERR(partitions) || partitions == NULL) { if (!silent) printk(KERN_WARNING "PARTSFS: Error getting partition information (check_partition failed)\n"); return NULL; } state->sector_size = bdev_logical_block_size(sb->s_bdev); state->capacity = get_capacity(disk); sb_set_blocksize(sb, state->sector_size); put_disk(disk); /* Count the partitions */ state->number_of_partitions = 0; state->last_partition = 0; for (p = 1; p < partitions->limit; p++) { if (partitions->parts[p].size != 0) { if (!silent) printk(KERN_WARNING "PARTSFS: Partition %d start: %llu size: %llu\n", p, (unsigned long long)partitions->parts[p].from, (unsigned long long)partitions->parts[p].size * state->sector_size); state->parts[p].from = partitions->parts[p].from; state->parts[p].size = partitions->parts[p].size; state->number_of_partitions++; state->last_partition = p; } } kfree(partitions); return state; }
static __init int start_module(void) { dev=name_to_dev_t("/dev/sda1"); // dev_t dev1=blk_lookup_devt("sda",1); printk(KERN_INFO "Major %d minor %d\n",MAJOR(dev),MINOR(dev)); disk = get_gendisk(dev,&dummy); printk(KERN_INFO "disk name %s\n",disk->disk_name); if(!disk) return 0; simple_tsk = kthread_run(simple_thread, NULL, "event-sample"); if (IS_ERR(simple_tsk)) return -1; // while(1) // { // } // } /* buf = (unsigned char*)vmalloc(0x800); memset( buf , 0xFE , 0x800 ); bio = bio_map_kern( disk->queue , buf , 0x400 , GFP_KERNEL ); if( IS_ERR(bio) ) { vfree(buf); return 0; } bio->bi_sector = 0; bio->bi_bdev = bdget_disk(disk,0); printk(" bi_bdev = %016lX\n",(unsigned long)(bio->bi_bdev)); printk(" bi_bdev->bd_disk = %s\n",(bio->bi_bdev->bd_disk->disk_name)); if(bio->bi_sector) printk(" sector %lld \n",bio->bi_sector); if(bio->bi_flags) printk("flags %lu\n",bio->bi_flags); if(bio->bi_rw) printk("rw %lu\n",bio->bi_rw); */// queue=disk->queue; // req=queue->boundary_rq; //printk(KERN_INFO "Pending requests:%d\n",(queue->nr_pending)); return 0; }
int _lkl_disk_del_disk(__kernel_dev_t devt) { struct block_device *bdev; struct gendisk *gd; int ret = 0, partno; bdev = bdget(devt); if (!bdev) return -EINVAL; gd = get_gendisk(new_decode_dev(devt), &partno); if (!gd || gd->major != major) { ret = -EINVAL; goto out; } del_gendisk(gd); out: bdput(bdev); return ret; }
/** * software_resume - Resume from a saved hibernation image. * * This routine is called as a late initcall, when all devices have been * discovered and initialized already. * * The image reading code is called to see if there is a hibernation image * available for reading. If that is the case, devices are quiesced and the * contents of memory is restored from the saved image. * * If this is successful, control reappears in the restored target kernel in * hibernation_snapshot() which returns to hibernate(). Otherwise, the routine * attempts to recover gracefully and make the kernel return to the normal mode * of operation. */ static int software_resume(void) { int error, nr_calls = 0; /* * If the user said "noresume".. bail out early. */ if (noresume || !hibernation_available()) return 0; /* * name_to_dev_t() below takes a sysfs buffer mutex when sysfs * is configured into the kernel. Since the regular hibernate * trigger path is via sysfs which takes a buffer mutex before * calling hibernate functions (which take pm_mutex) this can * cause lockdep to complain about a possible ABBA deadlock * which cannot happen since we're in the boot code here and * sysfs can't be invoked yet. Therefore, we use a subclass * here to avoid lockdep complaining. */ mutex_lock_nested(&pm_mutex, SINGLE_DEPTH_NESTING); if (swsusp_resume_device) goto Check_image; if (!strlen(resume_file)) { error = -ENOENT; goto Unlock; } pr_debug("Checking hibernation image partition %s\n", resume_file); if (resume_delay) { pr_info("Waiting %dsec before reading resume device ...\n", resume_delay); ssleep(resume_delay); } /* Check if the device is there */ swsusp_resume_device = name_to_dev_t(resume_file); /* * name_to_dev_t is ineffective to verify parition if resume_file is in * integer format. (e.g. major:minor) */ if (isdigit(resume_file[0]) && resume_wait) { int partno; while (!get_gendisk(swsusp_resume_device, &partno)) msleep(10); } if (!swsusp_resume_device) { /* * Some device discovery might still be in progress; we need * to wait for this to finish. */ wait_for_device_probe(); if (resume_wait) { while ((swsusp_resume_device = name_to_dev_t(resume_file)) == 0) msleep(10); async_synchronize_full(); } swsusp_resume_device = name_to_dev_t(resume_file); if (!swsusp_resume_device) { error = -ENODEV; goto Unlock; } } Check_image: pr_debug("Hibernation image partition %d:%d present\n", MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device)); pr_debug("Looking for hibernation image.\n"); error = swsusp_check(); if (error) goto Unlock; /* The snapshot device should not be opened while we're running */ if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { error = -EBUSY; swsusp_close(FMODE_READ); goto Unlock; } pm_prepare_console(); error = __pm_notifier_call_chain(PM_RESTORE_PREPARE, -1, &nr_calls); if (error) { nr_calls--; goto Close_Finish; } pr_debug("Preparing processes for restore.\n"); error = freeze_processes(); if (error) goto Close_Finish; error = load_image_and_restore(); thaw_processes(); Finish: __pm_notifier_call_chain(PM_POST_RESTORE, nr_calls, NULL); pm_restore_console(); atomic_inc(&snapshot_device_available); /* For success case, the suspend path will release the lock */ Unlock: mutex_unlock(&pm_mutex); pr_debug("Hibernation image not present or could not be loaded.\n"); return error; Close_Finish: swsusp_close(FMODE_READ); goto Finish; }
/** * software_resume - Resume from a saved hibernation image. * * This routine is called as a late initcall, when all devices have been * discovered and initialized already. * * The image reading code is called to see if there is a hibernation image * available for reading. If that is the case, devices are quiesced and the * contents of memory is restored from the saved image. * * If this is successful, control reappears in the restored target kernel in * hibernation_snaphot() which returns to hibernate(). Otherwise, the routine * attempts to recover gracefully and make the kernel return to the normal mode * of operation. */ static int software_resume(void) { int error; unsigned int flags; /* * If the user said "noresume".. bail out early. */ if (noresume) return 0; /* * name_to_dev_t() below takes a sysfs buffer mutex when sysfs * is configured into the kernel. Since the regular hibernate * trigger path is via sysfs which takes a buffer mutex before * calling hibernate functions (which take pm_mutex) this can * cause lockdep to complain about a possible ABBA deadlock * which cannot happen since we're in the boot code here and * sysfs can't be invoked yet. Therefore, we use a subclass * here to avoid lockdep complaining. */ mutex_lock_nested(&pm_mutex, SINGLE_DEPTH_NESTING); if (swsusp_resume_device) goto Check_image; if (!strlen(resume_file)) { error = -ENOENT; goto Unlock; } pr_debug("PM: Checking hibernation image partition %s\n", resume_file); /* Check if the device is there */ swsusp_resume_device = name_to_dev_t(resume_file); /* * name_to_dev_t is ineffective to verify parition if resume_file is in * integer format. (e.g. major:minor) */ if (isdigit(resume_file[0]) && resume_wait) { int partno; while (!get_gendisk(swsusp_resume_device, &partno)) msleep(10); } if (!swsusp_resume_device) { /* * Some device discovery might still be in progress; we need * to wait for this to finish. */ wait_for_device_probe(); if (resume_wait) { while ((swsusp_resume_device = name_to_dev_t(resume_file)) == 0) msleep(10); async_synchronize_full(); } /* * We can't depend on SCSI devices being available after loading * one of their modules until scsi_complete_async_scans() is * called and the resume device usually is a SCSI one. */ scsi_complete_async_scans(); swsusp_resume_device = name_to_dev_t(resume_file); if (!swsusp_resume_device) { error = -ENODEV; goto Unlock; } } Check_image: pr_debug("PM: Hibernation image partition %d:%d present\n", MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device)); pr_debug("PM: Looking for hibernation image.\n"); error = swsusp_check(); if (error) goto Unlock; /* The snapshot device should not be opened while we're running */ if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { error = -EBUSY; swsusp_close(FMODE_READ); goto Unlock; } pm_prepare_console(); error = pm_notifier_call_chain(PM_RESTORE_PREPARE); if (error) goto close_finish; error = usermodehelper_disable(); if (error) goto close_finish; error = create_basic_memory_bitmaps(); if (error) goto close_finish; pr_debug("PM: Preparing processes for restore.\n"); error = prepare_processes(); if (error) { swsusp_close(FMODE_READ); goto Done; } pr_debug("PM: Loading hibernation image.\n"); error = swsusp_read(&flags); swsusp_close(FMODE_READ); if (!error) hibernation_restore(flags & SF_PLATFORM_MODE); printk(KERN_ERR "PM: Failed to load hibernation image, recovering.\n"); swsusp_free(); thaw_processes(); Done: free_basic_memory_bitmaps(); usermodehelper_enable(); Finish: pm_notifier_call_chain(PM_POST_RESTORE); pm_restore_console(); atomic_inc(&snapshot_device_available); /* For success case, the suspend path will release the lock */ Unlock: mutex_unlock(&pm_mutex); pr_debug("PM: Hibernation image not present or could not be loaded.\n"); return error; close_finish: swsusp_close(FMODE_READ); goto Finish; }