コード例 #1
0
ファイル: rd.c プロジェクト: nhanh0/hah
static int rd_open(struct inode * inode, struct file * filp)
{
	int unit = DEVICE_NR(inode->i_rdev);

#ifdef CONFIG_BLK_DEV_INITRD
	if (unit == INITRD_MINOR) {
		if (!initrd_start) return -ENODEV;
		initrd_users++;
		filp->f_op = &initrd_fops;
		return 0;
	}
#endif

	if (unit >= NUM_RAMDISKS)
		return -ENXIO;

	/*
	 * Immunize device against invalidate_buffers() and prune_icache().
	 */
	if (rd_bdev[unit] == NULL) {
		rd_bdev[unit] = bdget(kdev_t_to_nr(inode->i_rdev));
		rd_bdev[unit]->bd_openers++;
		rd_bdev[unit]->bd_inode->i_mapping->a_ops = &ramdisk_aops;
	}

	return 0;
}
コード例 #2
0
void test_bdget(dev_t dev)
{
	struct block_device *bdev;

	bdev = bdget(dev);

	printk("test_bdget %d bdev %p\n", dev, bdev);
}
コード例 #3
0
ファイル: swapfile.c プロジェクト: acassis/emlinux-ssd1935
/*
 * Find the swap type that corresponds to given device (if any).
 *
 * @offset - number of the PAGE_SIZE-sized block of the device, starting
 * from 0, in which the swap header is expected to be located.
 *
 * This is needed for the suspend to disk (aka swsusp).
 */
int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
{
	struct block_device *bdev = NULL;
	int i;

	if (device)
		bdev = bdget(device);

	spin_lock(&swap_lock);
	for (i = 0; i < nr_swapfiles; i++) {
		struct swap_info_struct *sis = swap_info + i;

		if (!(sis->flags & SWP_WRITEOK))
			continue;

		if (!bdev) {
			if (bdev_p)
				*bdev_p = sis->bdev;

			spin_unlock(&swap_lock);
			return i;
		}
		if (bdev == sis->bdev) {
			struct swap_extent *se;

			se = list_entry(sis->extent_list.next,
					struct swap_extent, list);
			if (se->start_block == offset) {
				if (bdev_p)
					*bdev_p = sis->bdev;

				spin_unlock(&swap_lock);
				bdput(bdev);
				return i;
			}
		}
	}
	spin_unlock(&swap_lock);
	if (bdev)
		bdput(bdev);

	return -ENODEV;
}
コード例 #4
0
ファイル: disk.c プロジェクト: luciang/lkl-linux-2.6
int _lkl_disk_del_disk(__kernel_dev_t devt)
{
	struct block_device *bdev;
	struct gendisk *gd;
	int ret = 0, partno;

	bdev = bdget(devt);
	if (!bdev)
		return -EINVAL;

	gd = get_gendisk(new_decode_dev(devt), &partno);
	if (!gd || gd->major != major) {
		ret = -EINVAL;
		goto out;
	}

	del_gendisk(gd);

out:
	bdput(bdev);
	return ret;
}
コード例 #5
0
ファイル: block_dev.c プロジェクト: JBTech/ralink_rt5350
int set_blocksize(kdev_t dev, int size)
{
	int oldsize;
	struct block_device *bdev;

	/* Size must be a power of two, and between 512 and PAGE_SIZE */
	if (size > PAGE_SIZE || size < 512 || (size & (size-1)))
		return -EINVAL;

	/* Size cannot be smaller than the size supported by the device */
	if (size < get_hardsect_size(dev))
		return -EINVAL;

	/* No blocksize array? Implies hardcoded BLOCK_SIZE */
	if (!blksize_size[MAJOR(dev)]) {
		if (size == BLOCK_SIZE)
			return 0;
		return -EINVAL;
	}

	oldsize = blksize_size[MAJOR(dev)][MINOR(dev)];
	if (oldsize == size)
		return 0;

	if (!oldsize && size == BLOCK_SIZE) {
		blksize_size[MAJOR(dev)][MINOR(dev)] = size;
		return 0;
	}

	/* Ok, we're actually changing the blocksize.. */
	bdev = bdget(dev);
	sync_buffers(dev, 2);
	blksize_size[MAJOR(dev)][MINOR(dev)] = size;
	bdev->bd_inode->i_blkbits = blksize_bits(size);
	kill_bdev(bdev);
	bdput(bdev);
	return 0;
}
コード例 #6
0
int bd_acquire(struct inode *inode)
{
	struct block_device *bdev;
	spin_lock(&bdev_lock);
	if (inode->i_bdev) {
		atomic_inc(&inode->i_bdev->bd_count);
		spin_unlock(&bdev_lock);
		return 0;
	}
	spin_unlock(&bdev_lock);
	bdev = bdget(kdev_t_to_nr(inode->i_rdev));
	if (!bdev)
		return -ENOMEM;
	spin_lock(&bdev_lock);
	if (!inode->i_bdev) {
		inode->i_bdev = bdev;
		inode->i_mapping = bdev->bd_inode->i_mapping;
		list_add(&inode->i_devices, &bdev->bd_inodes);
	} else if (inode->i_bdev != bdev)
		BUG();
	spin_unlock(&bdev_lock);
	return 0;
}
コード例 #7
0
ファイル: stackbd-orig.c プロジェクト: rosrez/drv2
static struct block_device *stackbd_bdev_open(char dev_path[])
{
    /* Open underlying device */
    struct block_device *bdev_raw = lookup_bdev(dev_path);
    printk("Opened %s\n", dev_path);

    if (IS_ERR(bdev_raw))
    {
        printk("stackbd: error opening raw device <%lu>\n", PTR_ERR(bdev_raw));
        return NULL;
    }

    if (!bdget(bdev_raw->bd_dev))
    {
        printk("stackbd: error bdget()\n");
        return NULL;
    }


    /* FIXME:VER */
    /*    if (blkdev_get(bdev_raw, STACKBD_BDEV_MODE, &stackbd))*/
    if (blkdev_get(bdev_raw, STACKBD_BDEV_MODE))
    {
        printk("stackbd: error blkdev_get()\n");
        bdput(bdev_raw);
        return NULL;
    }

    if (bd_claim(bdev_raw, &stackbd)) {
        printk("stackbd: error bd_claim()\n");
        bdput(bdev_raw);
        return NULL;
    }

    return bdev_raw;
}
コード例 #8
0
int pio_write_corefile(int offset) {
	/*
	 * The original idea here is to write to the "alternate
	 * application partition".  This has some problems though,
	 * spec. with developer boxes.  For now, we're going to
	 * assume this won't be called from a dev build.  (This is so
	 * broken it bothers me, we could write it to swap -32M -1M)
	 *
	 * The second broken thing here is we don't account for
	 * actual size of the memory of the box in question.  We
	 * assume it's 32Meg.  This is somewhat easily overcome by
	 * just checking the config variables.  However, I'm loathe
	 * to do that at this point since having variable size input
	 * just risks adding a new bug to the system.  (Something I don't
	 * want to do at this point in the release cycle)
	 *
	 * This is a *very basic* implementation of what we want to do.
	 * It starts at 0x80000000, goes for 32MB writing out to alternate
	 * application at either "offset 0", or "offset 1".  Offset 0 is
	 * at the beginning of the partition.  Offset 1 is 32MB into the
	 * partition.  (ah, now you see why I don't want to deal with
	 * variable sizes)
	 *
	 * Cache flushing is not required as we're doing it PIO, which just
	 * pulls from cache.  Of course, we're taking the kernel centric
	 * as opposed to hardware centric view of memory.  This is reasonable
	 * because really, the core is just a bunch of kernel memory
	 * structures for the most part.
	 * 
	 */
	struct block_device *bdev;
	struct gendisk *gp;
	struct scsi_disk *sdkp;
	struct scsi_device *sdev;
	struct ata_port *ap;
	struct ata_device *dev;
	
	int altpartition;
	unsigned long secoffset = (CORE_SIZE / 512) * offset;
	u32 csum; /* Currently unused */
	static int core_written = 0;

	/* Only 0 or 1 are valid offset values */
	if(!(offset == 0 || offset == 1))
		return -EINVAL;

	/* If the root is sda4, then altap is sda7 and vice-versa */
	if(ROOT_DEV == MKDEV(SCSI_DISK0_MAJOR, 4))
		altpartition = 7;
	else if(ROOT_DEV == MKDEV(SCSI_DISK0_MAJOR, 7))
		altpartition = 4;
	else
		return -ENODEV;

        if( (bdev = bdget(MKDEV(SCSI_DISK0_MAJOR, 0))) != NULL )
        { 
                if( (gp = bdev->bd_disk) != NULL){
                        if ((sdkp = scsi_disk_get(gp)) == NULL){
                                return -ENXIO;
                        }
                }
                else{
                        return -ENODEV;
                }
        }
        else{
                return -ENODEV;
        }

        if((sdev = scsi_dev_get(sdkp)) == NULL)
        {
                return -ENXIO;
        }

        ap = ata_shost_to_port(sdev->host);
	dev = ata_scsi_find_dev(ap, sdev);	  	   

        if( ap == NULL || dev == NULL ){
                return -ENXIO;
        }
        
	if(core_written != 0) {
		printk("Core already written\n");
		return -EINVAL;
	}
	
	core_written = 1;
	
	/* Partition must be at least twice a CORE */
	/* disk_get_part() increments ref count so need to release */
	struct hd_struct *hd_info = disk_get_part(gp, altpartition);
	if(hd_info->nr_sects < ((2 * CORE_SIZE) / 512)) {
	    /* release hd_info */
	    disk_put_part(hd_info);
		return -ENODEV;
	}
	
	if(initdrive) {
                /* Issue software reset, disable ide interrupts */
		if(pio_reset(ap) < 0) {
		    /* release hd_info */
		    disk_put_part(hd_info);
			return -ENODEV;
		}
		initdrive=0;
	}

	secoffset += hd_info->start_sect;

	/* release hd_info */
	disk_put_part(hd_info);

	return pio_write_buffer(dev, ap, secoffset, (const u8*)CORE_START,
		       CORE_SIZE, &csum);
}
コード例 #9
0
/* Returns number of sectors written */
int pio_write_buffer_to_swap(const u8* buf, unsigned int size, int secoffset,
	u32 *csum) {
	struct block_device *bdev = NULL;
	struct gendisk *gp = NULL;
	struct scsi_disk *sdkp = NULL;
	struct scsi_device *sdev = NULL;
	struct ata_port *ap = NULL;
	struct ata_device *dev = NULL;
        int written_sect;
	
	unsigned long tsector;

        if( (bdev = bdget(MKDEV(SWAPFS_MAJOR, SWAPFS_MINOR))) != NULL )
        { 
                if( (gp = bdev->bd_disk) != NULL){
                        if ((sdkp = scsi_disk_get(gp)) == NULL){
                                return -ENXIO;
                        }
                }
                else{
                        return -ENODEV;
                }
        }
        else{
                return -ENODEV;
        }

        if((sdev = scsi_dev_get(sdkp)) == NULL)
        {
                return -ENXIO;
        }

        ap = ata_shost_to_port(sdev->host);
	dev = ata_scsi_find_dev(ap, sdev);	  	   

        if( ap == NULL || dev == NULL ){
                return -ENXIO;
        }

	/*
	 * We adjust secoffset to write to the last 1MB of the device,
	 * this is because we don't want to rebuild swap mostly, and
	 * because we can use "lspanic" to grab the last tivo panic.
	 *
	 * We will track which one we've logged by bit-inverting
	 * the checksum.
	 *
	 */
    /* disk_get_part() increments ref count so need to release */
    struct hd_struct *hd_info = disk_get_part(gp, SWAPFS_MINOR);
	/* Partition must be at least 2 MB */
	if(hd_info->nr_sects < (2* 1024 * 1024 / 512)){
		goto HD_INFO_ERROR;
        }

	/* Offset by 1MB from end of partition */
	secoffset += hd_info->nr_sects - (1 * 1024 * 1024 /512);

	/* Make sure partition is sane size-wise */

	if(hd_info->nr_sects < secoffset){
	    goto HD_INFO_ERROR;
        }
	if(hd_info->nr_sects - secoffset < ((size + 511)/512)) {
	    goto HD_INFO_ERROR;
	}
	
	if(initdrive) {
                /* Issue software reset, disable ide interrupts */
		if(pio_reset(ap) < 0) { 
		    goto HD_INFO_ERROR;
		}
	
                initdrive=0;
	}

	tsector = hd_info->start_sect + secoffset;

        written_sect = pio_write_buffer(dev, ap, tsector, buf, size, csum);

	scsi_disk_put(sdkp);
	/* release hd_info */
    disk_put_part(hd_info);
	return written_sect;

HD_INFO_ERROR:
    /* release hd_info */
    disk_put_part(hd_info);
    return -ENODEV;
}
コード例 #10
0
void refresh_sd_flags(PVBUS_EXT vbus_ext)
{
	static int major[] = { SCSI_DISK0_MAJOR, SCSI_DISK1_MAJOR, SCSI_DISK2_MAJOR, SCSI_DISK3_MAJOR, 
				SCSI_DISK4_MAJOR, SCSI_DISK5_MAJOR, SCSI_DISK6_MAJOR, SCSI_DISK7_MAJOR, 
				SCSI_DISK8_MAJOR, SCSI_DISK9_MAJOR, SCSI_DISK10_MAJOR, SCSI_DISK11_MAJOR, 
				SCSI_DISK12_MAJOR, SCSI_DISK13_MAJOR, SCSI_DISK14_MAJOR, SCSI_DISK15_MAJOR, 0 };
	int id;
	Scsi_Device *SDptr;

	vbus_ext->needs_refresh = 0;
	
	for (id=0; id<osm_max_targets; id++) {
		
		SDptr = scsi_device_lookup(vbus_ext->host, 0, id, 0);
		
		vbus_ext->sd_flags[id] &= ~SD_FLAG_IN_USE;
	
		if (SDptr) {
			int i, minor;
			for (i=0; major[i]; i++) {
				for (minor=0; minor<=240; minor+=16) {
					struct block_device *bdev = bdget(MKDEV(major[i], minor));
					if (bdev &&
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38)
						blkdev_get(bdev, FMODE_READ,NULL)
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
						blkdev_get(bdev, FMODE_READ)
#else 
						blkdev_get(bdev, FMODE_READ, 0 __BDEV_RAW)
#endif
						==0) {
						if (bdev->bd_disk && bdev->bd_disk->driverfs_dev==&SDptr->sdev_gendev) {
							if (vbus_ext->sd_flags[id] & SD_FLAG_REVALIDATE) {
								if (bdev->bd_disk->fops->revalidate_disk)
									bdev->bd_disk->fops->revalidate_disk(bdev->bd_disk);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
								mutex_lock(&bdev->bd_inode->i_mutex);
#else 
								down(&bdev->bd_inode->i_sem);
#endif
								i_size_write(bdev->bd_inode, (loff_t)get_capacity(bdev->bd_disk)<<9);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
								mutex_unlock(&bdev->bd_inode->i_mutex);
#else 
								up(&bdev->bd_inode->i_sem);
#endif
								vbus_ext->sd_flags[id] &= ~SD_FLAG_REVALIDATE;
							}
							if (bdev->bd_openers>1)
								vbus_ext->sd_flags[id] |= SD_FLAG_IN_USE;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
							blkdev_put(bdev, FMODE_READ);
#else 
							blkdev_put(bdev __BDEV_RAW);
#endif
							goto next;
						}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
						blkdev_put(bdev, FMODE_READ);
#else 
						blkdev_put(bdev __BDEV_RAW);
#endif
					}
				}
			}
next:
			scsi_device_put(SDptr);
		}
	}
}
コード例 #11
0
ファイル: blktrace.c プロジェクト: swrite/ali_kernel
static int blk_trace_synthesize_old_trace(struct trace_iterator *iter)
{
	struct trace_seq *s = &iter->seq;
	struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
	const int offset = offsetof(struct blk_io_trace, sector);
	struct blk_io_trace old = {
		.magic	  = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
		.time     = iter->ts,
	};

	if (!trace_seq_putmem(s, &old, offset))
		return 0;
	return trace_seq_putmem(s, &t->sector,
				sizeof(old) - offset + t->pdu_len);
}

static enum print_line_t
blk_trace_event_print_binary(struct trace_iterator *iter, int flags)
{
	return blk_trace_synthesize_old_trace(iter) ?
			TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
}

static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
{
	if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
		return TRACE_TYPE_UNHANDLED;

	return print_one_line(iter, true);
}

static int blk_tracer_set_flag(u32 old_flags, u32 bit, int set)
{
	/* don't output context-info for blk_classic output */
	if (bit == TRACE_BLK_OPT_CLASSIC) {
		if (set)
			trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
		else
			trace_flags |= TRACE_ITER_CONTEXT_INFO;
	}
	return 0;
}

static struct tracer blk_tracer __read_mostly = {
	.name		= "blk",
	.init		= blk_tracer_init,
	.reset		= blk_tracer_reset,
	.start		= blk_tracer_start,
	.stop		= blk_tracer_stop,
	.print_header	= blk_tracer_print_header,
	.print_line	= blk_tracer_print_line,
	.flags		= &blk_tracer_flags,
	.set_flag	= blk_tracer_set_flag,
};

static struct trace_event trace_blk_event = {
	.type		= TRACE_BLK,
	.trace		= blk_trace_event_print,
	.binary		= blk_trace_event_print_binary,
};

static int __init init_blk_tracer(void)
{
	if (!register_ftrace_event(&trace_blk_event)) {
		pr_warning("Warning: could not register block events\n");
		return 1;
	}

	if (register_tracer(&blk_tracer) != 0) {
		pr_warning("Warning: could not register the block tracer\n");
		unregister_ftrace_event(&trace_blk_event);
		return 1;
	}

	return 0;
}

device_initcall(init_blk_tracer);

static int blk_trace_remove_queue(struct request_queue *q)
{
	struct blk_trace *bt;

	bt = xchg(&q->blk_trace, NULL);
	if (bt == NULL)
		return -EINVAL;

	if (atomic_dec_and_test(&blk_probes_ref))
		blk_unregister_tracepoints();

	blk_trace_free(bt);
	return 0;
}

/*
 * Setup everything required to start tracing
 */
static int blk_trace_setup_queue(struct request_queue *q,
				 struct block_device *bdev)
{
	struct blk_trace *old_bt, *bt = NULL;
	int ret = -ENOMEM;

	bt = kzalloc(sizeof(*bt), GFP_KERNEL);
	if (!bt)
		return -ENOMEM;

	bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
	if (!bt->msg_data)
		goto free_bt;

	bt->dev = bdev->bd_dev;
	bt->act_mask = (u16)-1;

	blk_trace_setup_lba(bt, bdev);

	old_bt = xchg(&q->blk_trace, bt);
	if (old_bt != NULL) {
		(void)xchg(&q->blk_trace, old_bt);
		ret = -EBUSY;
		goto free_bt;
	}

	if (atomic_inc_return(&blk_probes_ref) == 1)
		blk_register_tracepoints();
	return 0;

free_bt:
	blk_trace_free(bt);
	return ret;
}

/*
 * sysfs interface to enable and configure tracing
 */

static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
					 struct device_attribute *attr,
					 char *buf);
static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
					  struct device_attribute *attr,
					  const char *buf, size_t count);
#define BLK_TRACE_DEVICE_ATTR(_name) \
	DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
		    sysfs_blk_trace_attr_show, \
		    sysfs_blk_trace_attr_store)

static BLK_TRACE_DEVICE_ATTR(enable);
static BLK_TRACE_DEVICE_ATTR(act_mask);
static BLK_TRACE_DEVICE_ATTR(pid);
static BLK_TRACE_DEVICE_ATTR(start_lba);
static BLK_TRACE_DEVICE_ATTR(end_lba);

static struct attribute *blk_trace_attrs[] = {
	&dev_attr_enable.attr,
	&dev_attr_act_mask.attr,
	&dev_attr_pid.attr,
	&dev_attr_start_lba.attr,
	&dev_attr_end_lba.attr,
	NULL
};

struct attribute_group blk_trace_attr_group = {
	.name  = "trace",
	.attrs = blk_trace_attrs,
};

static const struct {
	int mask;
	const char *str;
} mask_maps[] = {
	{ BLK_TC_READ,		"read"		},
	{ BLK_TC_WRITE,		"write"		},
	{ BLK_TC_FLUSH,		"flush"		},
	{ BLK_TC_SYNC,		"sync"		},
	{ BLK_TC_QUEUE,		"queue"		},
	{ BLK_TC_REQUEUE,	"requeue"	},
	{ BLK_TC_ISSUE,		"issue"		},
	{ BLK_TC_COMPLETE,	"complete"	},
	{ BLK_TC_FS,		"fs"		},
	{ BLK_TC_PC,		"pc"		},
	{ BLK_TC_AHEAD,		"ahead"		},
	{ BLK_TC_META,		"meta"		},
	{ BLK_TC_DISCARD,	"discard"	},
	{ BLK_TC_DRV_DATA,	"drv_data"	},
	{ BLK_TC_FUA,		"fua"		},
};

static int blk_trace_str2mask(const char *str)
{
	int i;
	int mask = 0;
	char *buf, *s, *token;

	buf = kstrdup(str, GFP_KERNEL);
	if (buf == NULL)
		return -ENOMEM;
	s = strstrip(buf);

	while (1) {
		token = strsep(&s, ",");
		if (token == NULL)
			break;

		if (*token == '\0')
			continue;

		for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
			if (strcasecmp(token, mask_maps[i].str) == 0) {
				mask |= mask_maps[i].mask;
				break;
			}
		}
		if (i == ARRAY_SIZE(mask_maps)) {
			mask = -EINVAL;
			break;
		}
	}
	kfree(buf);

	return mask;
}

static ssize_t blk_trace_mask2str(char *buf, int mask)
{
	int i;
	char *p = buf;

	for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
		if (mask & mask_maps[i].mask) {
			p += sprintf(p, "%s%s",
				    (p == buf) ? "" : ",", mask_maps[i].str);
		}
	}
	*p++ = '\n';

	return p - buf;
}

static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
{
	if (bdev->bd_disk == NULL)
		return NULL;

	return bdev_get_queue(bdev);
}

static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
					 struct device_attribute *attr,
					 char *buf)
{
	struct hd_struct *p = dev_to_part(dev);
	struct request_queue *q;
	struct block_device *bdev;
	ssize_t ret = -ENXIO;

	lock_kernel();
	bdev = bdget(part_devt(p));
	if (bdev == NULL)
		goto out_unlock_kernel;

	q = blk_trace_get_queue(bdev);
	if (q == NULL)
		goto out_bdput;

	mutex_lock(&bdev->bd_mutex);

	if (attr == &dev_attr_enable) {
		ret = sprintf(buf, "%u\n", !!q->blk_trace);
		goto out_unlock_bdev;
	}

	if (q->blk_trace == NULL)
		ret = sprintf(buf, "disabled\n");
	else if (attr == &dev_attr_act_mask)
		ret = blk_trace_mask2str(buf, q->blk_trace->act_mask);
	else if (attr == &dev_attr_pid)
		ret = sprintf(buf, "%u\n", q->blk_trace->pid);
	else if (attr == &dev_attr_start_lba)
		ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba);
	else if (attr == &dev_attr_end_lba)
		ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba);

out_unlock_bdev:
	mutex_unlock(&bdev->bd_mutex);
out_bdput:
	bdput(bdev);
out_unlock_kernel:
	unlock_kernel();
	return ret;
}

static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
					  struct device_attribute *attr,
					  const char *buf, size_t count)
{
	struct block_device *bdev;
	struct request_queue *q;
	struct hd_struct *p;
	u64 value;
	ssize_t ret = -EINVAL;

	if (count == 0)
		goto out;

	if (attr == &dev_attr_act_mask) {
		if (sscanf(buf, "%llx", &value) != 1) {
			/* Assume it is a list of trace category names */
			ret = blk_trace_str2mask(buf);
			if (ret < 0)
				goto out;
			value = ret;
		}
	} else if (sscanf(buf, "%llu", &value) != 1)
		goto out;

	ret = -ENXIO;

	lock_kernel();
	p = dev_to_part(dev);
	bdev = bdget(part_devt(p));
	if (bdev == NULL)
		goto out_unlock_kernel;

	q = blk_trace_get_queue(bdev);
	if (q == NULL)
		goto out_bdput;

	mutex_lock(&bdev->bd_mutex);

	if (attr == &dev_attr_enable) {
		if (value)
			ret = blk_trace_setup_queue(q, bdev);
		else
			ret = blk_trace_remove_queue(q);
		goto out_unlock_bdev;
	}

	ret = 0;
	if (q->blk_trace == NULL)
		ret = blk_trace_setup_queue(q, bdev);

	if (ret == 0) {
		if (attr == &dev_attr_act_mask)
			q->blk_trace->act_mask = value;
		else if (attr == &dev_attr_pid)
			q->blk_trace->pid = value;
		else if (attr == &dev_attr_start_lba)
			q->blk_trace->start_lba = value;
		else if (attr == &dev_attr_end_lba)
			q->blk_trace->end_lba = value;
	}

out_unlock_bdev:
	mutex_unlock(&bdev->bd_mutex);
out_bdput:
	bdput(bdev);
out_unlock_kernel:
	unlock_kernel();
out:
	return ret ? ret : count;
}

int blk_trace_init_sysfs(struct device *dev)
{
	return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
}

void blk_trace_remove_sysfs(struct device *dev)
{
	sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
}
コード例 #12
0
ファイル: raw.c プロジェクト: sarnobat/knoppix
/*
 * Deal with ioctls against the raw-device control interface, to bind
 * and unbind other raw devices.
 */
static int raw_ctl_ioctl(struct inode *inode, struct file *filp,
			unsigned int command, unsigned long arg)
{
	struct raw_config_request rq;
	struct raw_device_data *rawdev;
	int err = 0;

	switch (command) {
	case RAW_SETBIND:
	case RAW_GETBIND:

		/* First, find out which raw minor we want */

		if (copy_from_user(&rq, (void *) arg, sizeof(rq))) {
			err = -EFAULT;
			goto out;
		}

		if (rq.raw_minor < 0 || rq.raw_minor >= MAX_RAW_MINORS) {
			err = -EINVAL;
			goto out;
		}
		rawdev = &raw_devices[rq.raw_minor];

		if (command == RAW_SETBIND) {
			dev_t dev;

			/*
			 * This is like making block devices, so demand the
			 * same capability
			 */
			if (!capable(CAP_SYS_ADMIN)) {
				err = -EPERM;
				goto out;
			}

			/*
			 * For now, we don't need to check that the underlying
			 * block device is present or not: we can do that when
			 * the raw device is opened.  Just check that the
			 * major/minor numbers make sense.
			 */

			dev = MKDEV(rq.block_major, rq.block_minor);
			if ((rq.block_major == 0 && rq.block_minor != 0) ||
					MAJOR(dev) != rq.block_major ||
					MINOR(dev) != rq.block_minor) {
				err = -EINVAL;
				goto out;
			}

			down(&raw_mutex);
			if (rawdev->inuse) {
				up(&raw_mutex);
				err = -EBUSY;
				goto out;
			}
			if (rawdev->binding) {
				bdput(rawdev->binding);
				module_put(THIS_MODULE);
			}
			if (rq.block_major == 0 && rq.block_minor == 0) {
				/* unbind */
				rawdev->binding = NULL;
			} else {
				rawdev->binding = bdget(dev);
				if (rawdev->binding == NULL)
					err = -ENOMEM;
				else
					__module_get(THIS_MODULE);
			}
			up(&raw_mutex);
		} else {
			struct block_device *bdev;

			down(&raw_mutex);
			bdev = rawdev->binding;
			if (bdev) {
				rq.block_major = MAJOR(bdev->bd_dev);
				rq.block_minor = MINOR(bdev->bd_dev);
			} else {
				rq.block_major = rq.block_minor = 0;
			}
			up(&raw_mutex);
			if (copy_to_user((void *)arg, &rq, sizeof(rq))) {
				err = -EFAULT;
				goto out;
			}
		}
		break;
	default:
		err = -EINVAL;
		break;
	}
out:
	return err;
}
コード例 #13
0
ファイル: passthrough.c プロジェクト: Dolbysh/gestion_projet
static int setup_passthrough_device(struct pt_dev *dev, const char *target_name)
{
	struct request_queue *q;

	dev->queue = blk_alloc_queue(GFP_KERNEL);
	if (dev->queue == NULL)
		return -1;

	blk_queue_make_request(dev->queue, passthrough_make_request);
	//blk_queue_flush(dev->queue, REQ_FLUSH | REQ_FUA);

	dev->gd = alloc_disk(1);
	if (! dev->gd) {

		return -1;
	}

	dev->gd->major = passthrough->major;
	dev->gd->first_minor = 0;
	dev->gd->fops = &pt_ops;
	dev->gd->queue = dev->queue;
	dev->gd->private_data = dev;
	dev->gd->flags |= GENHD_FL_EXT_DEVT;

	dev->target_dev = bdget(MKDEV(8,16));//blkdev_get_by_path(target_name, FMODE_READ|FMODE_WRITE|FMODE_EXCL, dev);
	if(!dev->target_dev)
	{
		return -1;
	}

	dev->target_ssd = bdget(MKDEV(8,32));//blkdev_get_by_path(target_name, FMODE_READ|FMODE_WRITE|FMODE_EXCL, dev);
	if(!dev->target_ssd)
	{
		return -1;
	}

	if(!dev->target_dev->bd_disk)
	{
		return -1;
	}

	if(!dev->target_ssd->bd_disk)
	{
		return -1;
	}

	q = bdev_get_queue(dev->target_dev);
	if(!q)
	{
		return -1;
	}

	dev->gd->queue->limits.max_hw_sectors	= q->limits.max_hw_sectors;
	dev->gd->queue->limits.max_sectors	= q->limits.max_sectors;
	dev->gd->queue->limits.max_segment_size	= q->limits.max_segment_size;
//	dev->gd->queue->limits.max_segments	= q->limits.max_segments;
	dev->gd->queue->limits.logical_block_size  = 512;
	dev->gd->queue->limits.physical_block_size = 512;
	set_bit(QUEUE_FLAG_NONROT, &dev->gd->queue->queue_flags);

	snprintf (dev->gd->disk_name, 32, "passthrough");

	set_capacity(dev->gd, get_capacity(dev->target_dev->bd_disk));

	add_disk(dev->gd);

	return 1;
}
コード例 #14
0
ファイル: blkmtd.c プロジェクト: nhanh0/hah
/* Startup */
static int __init init_blkmtd(void)
{
  struct file *file = NULL;
  struct inode *inode;
  mtd_raw_dev_data_t *rawdevice = NULL;
  int maj, min;
  int i, blocksize, blocksize_bits;
  loff_t size = 0;
  int readonly = 0;
  int erase_size = CONFIG_MTD_BLKDEV_ERASESIZE;
  kdev_t rdev;
  int err;
  int mode;
  int totalsize = 0, total_sectors = 0;
  int regions;

  mtd_info = NULL;

  // Check args
  if(device == 0) {
    printk("blkmtd: error, missing `device' name\n");
    return 1;
  }

  if(ro)
    readonly = 1;

  if(erasesz)
    erase_size = erasesz;

  DEBUG(1, "blkmtd: got device = `%s' erase size = %dK readonly = %s\n", device, erase_size, readonly ? "yes" : "no");
  // Get a handle on the device
  mode = (readonly) ? O_RDONLY : O_RDWR;
  file = filp_open(device, mode, 0);
  if(IS_ERR(file)) {
    DEBUG(2, "blkmtd: open_namei returned %ld\n", PTR_ERR(file));
    return 1;
  }
  
  /* determine is this is a block device and if so get its major and minor
     numbers */
  inode = file->f_dentry->d_inode;
  if(!S_ISBLK(inode->i_mode)) {
    printk("blkmtd: %s not a block device\n", device);
    filp_close(file, NULL);
    return 1;
  }
  rdev = inode->i_rdev;
  //filp_close(file, NULL);
  DEBUG(1, "blkmtd: found a block device major = %d, minor = %d\n",
	 MAJOR(rdev), MINOR(rdev));
  maj = MAJOR(rdev);
  min = MINOR(rdev);

  if(maj == MTD_BLOCK_MAJOR) {
    printk("blkmtd: attempting to use an MTD device as a block device\n");
    return 1;
  }

  DEBUG(1, "blkmtd: devname = %s\n", bdevname(rdev));
  blocksize = BLOCK_SIZE;

  if(bs) {
    blocksize = bs;
  } else {
    if (blksize_size[maj] && blksize_size[maj][min]) {
      DEBUG(2, "blkmtd: blksize_size = %d\n", blksize_size[maj][min]);
      blocksize = blksize_size[maj][min];
    }
  }
  i = blocksize;
  blocksize_bits = 0;
  while(i != 1) {
    blocksize_bits++;
    i >>= 1;
  }

  if(count) {
    size = count;
  } else {
    if (blk_size[maj]) {
      size = ((loff_t) blk_size[maj][min] << BLOCK_SIZE_BITS) >> blocksize_bits;
    }
  }
  total_sectors = size;
  size *= blocksize;
  totalsize = size;
  DEBUG(1, "blkmtd: size = %ld\n", (long int)size);

  if(size == 0) {
    printk("blkmtd: cant determine size\n");
    return 1;
  }
  rawdevice = (mtd_raw_dev_data_t *)kmalloc(sizeof(mtd_raw_dev_data_t), GFP_KERNEL);
  if(rawdevice == NULL) {
    err = -ENOMEM;
    goto init_err;
  }
  memset(rawdevice, 0, sizeof(mtd_raw_dev_data_t));
  // get the block device
  rawdevice->binding = bdget(kdev_t_to_nr(MKDEV(maj, min)));
  err = blkdev_get(rawdevice->binding, mode, 0, BDEV_RAW);
  if (err) {
    goto init_err;
  }
  rawdevice->totalsize = totalsize;
  rawdevice->total_sectors = total_sectors;
  rawdevice->sector_size = blocksize;
  rawdevice->sector_bits = blocksize_bits;
  rawdevice->readonly = readonly;

  DEBUG(2, "sector_size = %d, sector_bits = %d\n", rawdevice->sector_size, rawdevice->sector_bits);

  mtd_info = (struct mtd_info *)kmalloc(sizeof(struct mtd_info), GFP_KERNEL);
  if (mtd_info == NULL) {
    err = -ENOMEM;
    goto init_err;
  }
  memset(mtd_info, 0, sizeof(*mtd_info));

  // Setup the MTD structure
  mtd_info->name = "blkmtd block device";
  if(readonly) {
    mtd_info->type = MTD_ROM;
    mtd_info->flags = MTD_CAP_ROM;
    mtd_info->erasesize = erase_size << 10;
  } else {
    mtd_info->type = MTD_RAM;
    mtd_info->flags = MTD_CAP_RAM;
    mtd_info->erasesize = erase_size << 10;
  }
  mtd_info->size = size;
  mtd_info->erase = blkmtd_erase;
  mtd_info->read = blkmtd_read;
  mtd_info->write = blkmtd_write;
  mtd_info->sync = blkmtd_sync;
  mtd_info->point = 0;
  mtd_info->unpoint = 0;

  mtd_info->priv = rawdevice;
  regions = calc_erase_regions(NULL, erase_size << 10, size);
  DEBUG(1, "blkmtd: init: found %d erase regions\n", regions);
  mtd_info->eraseregions = kmalloc(regions * sizeof(struct mtd_erase_region_info), GFP_KERNEL);
  if(mtd_info->eraseregions == NULL) {
  }
  mtd_info->numeraseregions = regions;
  calc_erase_regions(mtd_info->eraseregions, erase_size << 10, size);

  /* setup the page cache info */
  INIT_LIST_HEAD(&rawdevice->as.clean_pages);
  INIT_LIST_HEAD(&rawdevice->as.dirty_pages);
  INIT_LIST_HEAD(&rawdevice->as.locked_pages);
  rawdevice->as.nrpages = 0;
  rawdevice->as.a_ops = &blkmtd_aops;
  rawdevice->as.host = inode;
  rawdevice->as.i_mmap = NULL;
  rawdevice->as.i_mmap_shared = NULL;
  spin_lock_init(&rawdevice->as.i_shared_lock);
  rawdevice->as.gfp_mask = GFP_KERNEL;
  rawdevice->file = file;

  file->private_data = rawdevice;

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
   mtd_info->module = THIS_MODULE;			
#endif
   if (add_mtd_device(mtd_info)) {
     err = -EIO;
     goto init_err;
   }
   init_waitqueue_head(&thr_wq);
   init_waitqueue_head(&mtbd_sync_wq);
   DEBUG(3, "blkmtd: init: kernel task @ %p\n", write_queue_task);
   DEBUG(2, "blkmtd: init: starting kernel task\n");
   kernel_thread(write_queue_task, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
   DEBUG(2, "blkmtd: init: started\n");
   printk("blkmtd loaded: version = %s using %s erase_size = %dK %s\n", VERSION, device, erase_size, (readonly) ? "(read-only)" : "");
   return 0;

 init_err:
   if(!rawdevice) {
     if(rawdevice->binding) 
       blkdev_put(rawdevice->binding, BDEV_RAW);

     kfree(rawdevice);
     rawdevice = NULL;
   }
   if(mtd_info) {
     if(mtd_info->eraseregions)
       kfree(mtd_info->eraseregions);
     kfree(mtd_info);
     mtd_info = NULL;
   }
   return err;
}