Пример #1
0
static int sync_request(struct page *page, struct block_device *bdev, int rw)
{
	struct bio bio;
	struct bio_vec bio_vec;
	struct completion complete;

	bio_init(&bio);
	bio.bi_io_vec = &bio_vec;
	bio_vec.bv_page = page;
	bio_vec.bv_len = PAGE_SIZE;
	bio_vec.bv_offset = 0;
	bio.bi_vcnt = 1;
	bio.bi_idx = 0;
	bio.bi_size = PAGE_SIZE;
	bio.bi_bdev = bdev;
	bio.bi_sector = page->index * (PAGE_SIZE >> 9);
	init_completion(&complete);
	bio.bi_private = &complete;
	bio.bi_end_io = request_complete;

	submit_bio(rw, &bio);
	generic_unplug_device(bdev_get_queue(bdev));
	wait_for_completion(&complete);
	return test_bit(BIO_UPTODATE, &bio.bi_flags) ? 0 : -EIO;
}
Пример #2
0
static int bootstrap2(void *arg)
{
	dprintf(SPEW, "top of bootstrap2()\n");

	arch_init();

	// initialize the dpc system
#if WITH_LIB_DPC
	dpc_init();
#endif

	// XXX put this somewhere else
#if WITH_LIB_BIO
	bio_init();
#endif
#if WITH_LIB_FS
	fs_init();
#endif

	// initialize the rest of the platform
	dprintf(SPEW, "initializing platform\n");
	platform_init();

	// initialize the target
	dprintf(SPEW, "initializing target\n");
	target_init();

	dprintf(SPEW, "calling apps_init()\n");
	apps_init();

	return 0;
}
Пример #3
0
/**
 * bio_alloc_bioset - allocate a bio for I/O
 * @gfp_mask:   the GFP_ mask given to the slab allocator
 * @nr_iovecs:	number of iovecs to pre-allocate
 * @bs:		the bio_set to allocate from
 *
 * Description:
 *   bio_alloc_bioset will first try it's on mempool to satisfy the allocation.
 *   If %__GFP_WAIT is set then we will block on the internal pool waiting
 *   for a &struct bio to become free.
 *
 *   allocate bio and iovecs from the memory pools specified by the
 *   bio_set structure.
 **/
struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
{
	struct bio *bio = mempool_alloc(bs->bio_pool, gfp_mask);

	if (likely(bio)) {
		struct bio_vec *bvl = NULL;

		bio_init(bio);
		if (likely(nr_iovecs)) {
			unsigned long uninitialized_var(idx);

			bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs);
			if (unlikely(!bvl)) {
				mempool_free(bio, bs->bio_pool);
				bio = NULL;
				goto out;
			}
			bio->bi_flags |= idx << BIO_POOL_OFFSET;
			bio->bi_max_vecs = bvec_nr_vecs(idx);
		}
		bio->bi_io_vec = bvl;
	}
out:
	return bio;
}
static int block_read(const char *user_dev_path, /* Path to rpmb device */
		char *read_buff, /* User buffer */
		size_t size) /* Size of data to read (in bytes) */
{
	int i = 0, index = 0;
	int err;
	struct block_device *bdev;
	struct bio bio;
	struct bio_vec bio_vec;
	struct completion complete;
	struct page *page;
	int end_sect;

	bdev = blkdev_get_by_path(user_dev_path,
				  FMODE_READ, block_read);

	if (IS_ERR(bdev)) {
		pr_err("failed to get block device %s (%ld)\n",
		      user_dev_path, PTR_ERR(bdev));
		return -ENODEV;
	}

	page = virt_to_page(bio_buff);

	end_sect = (size - 1) / 512;

	for (i = 0; i <= end_sect; i++) {
		bio_init(&bio);
		bio.bi_io_vec = &bio_vec;
		bio_vec.bv_page = page;
		bio_vec.bv_len = 512;
		bio_vec.bv_offset = 0;
		bio.bi_vcnt = 1;
		bio.bi_idx = 0;
		bio.bi_size = 512;
		bio.bi_bdev = bdev;
		bio.bi_sector = 0;
		init_completion(&complete);
		bio.bi_private = &complete;
		bio.bi_end_io = emmc_rpmb_bio_complete;
		submit_bio(READ, &bio);
		wait_for_completion(&complete);
		if (!test_bit(BIO_UPTODATE, &bio.bi_flags)) {
			err = -EIO;
			goto out_blkdev;
		}

		memcpy(read_buff + index, bio_buff, 512);
		index += 512;
	}

	err = size;

out_blkdev:
	blkdev_put(bdev, FMODE_READ);

	return err;
}
Пример #5
0
void  main_init( void )
{
  int   sts;
  
  sts = get_reset_reason();
  main_status = MAIN_STS_NO_SNS_ERR;
  main_is_enter_stby_mode = 0;
  
  bio_init();
  clk_init();
  uart_init();
  dbg_wait();
  
#ifdef FEATURE_WDOG_TRIG
  IWDG_Enable();
  bio_led_ctrl( BIO_LED_IX_POWER, 0, 0, 0 );
#else // FEATURE_WDOG_TRIG
  dbg_out( "\r\n**********==> Watchdog Disabled!!!!!\r\n" );
  bio_led_ctrl( BIO_LED_IX_POWER, 500, 500, BIO_LED_ALWAYS_WINK );
#endif  // FEATURE_WDOG_TRIG
  
  dbg_out_pool_const( UART_DBG_MSG_ENTER3 );
  dbg_out( "================== 0x%02X =================\r\n", sts );
  dbg_out( "==== Welcome to %s  Ver %d.%02X ====\r\n",
           MAIN_TIT_STR, MAIN_VER_MAJ, MAIN_VER_MIN );
  dbg_out( "=====   i=%d, si=%d, li=%d, f=%d, d=%d   =====\r\n",
           sizeof( int ), sizeof( short int ),
           sizeof( long  int ),sizeof( float ),sizeof( double ) );
  dbg_out( "=====  %dMHz %s  %s  =====\r\n",
           SystemCoreClock / 1000000, __DATE__, __TIME__ );
  dbg_out_pool_const( UART_DBG_MSG_EQ_LINE );
  dbg_wait();
  
  view_reset_reason( sts );
  eep_param_init();
  IWDG_ReloadCounter();
  
  u3_ctrl.sw_baud = 115200;
  uart3_init();
  sns_init();
  adc_init();
  zb_init();
  encb_init();
  fnd_init();
  indc_init();
  
  bio_set_fan_pwm_base_freq( eep_hw_info.fan_base_f );
  cmd_rx_ptr = u1_ctrl.rx_bptr;
  
  // set monitor timer
  clk_set_timer( &monit_timer, MAIN_MONITOR_MS, MAIN_MONITOR_MS,
                 CLK_SIGS_MONITOR );
  
} // end of main_init()
static void mmc_panic_erase(void)
{
	int i = 0;
	int err;
	struct apanic_data *ctx = &drv_ctx;
	struct block_device *bdev;
	struct bio bio;
	struct bio_vec bio_vec;
	struct completion complete;
	struct page *page;

	bdev = lookup_bdev(ctx->devpath);
	if (IS_ERR(bdev)) {
		printk(KERN_ERR DRVNAME "failed to look up device %s (%ld)\n",
		       ctx->devpath, PTR_ERR(bdev));
		return;
	}
	err = blkdev_get(bdev, FMODE_WRITE);
	if (err) {
		printk(KERN_ERR DRVNAME "failed to open device %s (%d)\n",
		       ctx->devpath, err);
		return;
	}
	page = virt_to_page(ctx->bounce);
	memset(ctx->bounce, 0, PAGE_SIZE);

	while (i < bdev->bd_part->nr_sects) {
		bio_init(&bio);
		bio.bi_io_vec = &bio_vec;
		bio_vec.bv_offset = 0;
		bio_vec.bv_page = page;
		bio.bi_vcnt = 1;
		bio.bi_idx = 0;
		bio.bi_sector = i;
		if (bdev->bd_part->nr_sects - i >= 8) {
			bio_vec.bv_len = PAGE_SIZE;
			bio.bi_size = PAGE_SIZE;
			i += 8;
		} else {
			bio_vec.bv_len = (bdev->bd_part->nr_sects - i) * 512;
			bio.bi_size = (bdev->bd_part->nr_sects - i) * 512;
			i = bdev->bd_part->nr_sects;
		}
		bio.bi_bdev = bdev;
		init_completion(&complete);
		bio.bi_private = &complete;
		bio.bi_end_io = mmc_bio_complete;
		submit_bio(WRITE, &bio);
		wait_for_completion(&complete);
	}
	blkdev_put(bdev, FMODE_WRITE);

	return;
}
Пример #7
0
bio_t* make_bio()
{
    int i = 0;

    bio_t *bio = NULL;

    bio = bio_cachep->kmem_ops->get_obj(bio_cachep);
    bio_init(bio);

    return bio;
}
static int block_write(const char *user_dev_path, /* Path to rpmb device node */
		const char *write_buff, /* buffer to write to rpmb */
		size_t size, /* size of data to write (in bytes) */
		int flags) /* REQ_META flags for Reliable writes */
{
	int i = 0, index = 0;
	struct block_device *bdev;
	struct bio bio;
	struct bio_vec bio_vec;
	struct completion complete;
	struct page *page;
	int end_sect;

	bdev = blkdev_get_by_path(user_dev_path,
				  FMODE_WRITE, block_write);

	if (IS_ERR(bdev)) {
		pr_err("failed to get block device %s (%ld)\n",
		      user_dev_path, PTR_ERR(bdev));
		return -ENODEV;
	}

	page = virt_to_page(bio_buff);

	end_sect = (size - 1) / 512;

	for (i = 0; i <= end_sect; i++) {
		/* Copy data from user buffer to bio buffer */
		memcpy(bio_buff, write_buff + index, 512);
		index += 512;

		bio_init(&bio);
		bio.bi_io_vec = &bio_vec;
		bio_vec.bv_page = page;
		bio_vec.bv_len = 512;
		bio_vec.bv_offset = 0;
		bio.bi_vcnt = 1;
		bio.bi_idx = 0;
		bio.bi_size = 512;
		bio.bi_bdev = bdev;
		/* Set to 0 because the addr is part of RPMB data frame */
		bio.bi_sector = 0;
		init_completion(&complete);
		bio.bi_private = &complete;
		bio.bi_end_io = emmc_rpmb_bio_complete;
		submit_bio(WRITE | flags, &bio);
		wait_for_completion(&complete);
	}

	blkdev_put(bdev, FMODE_WRITE);

	return 0;
}
Пример #9
0
static void mmc_panic_erase(void)
{
	int i = 0;
	struct apanic_data *ctx = &drv_ctx;
	struct block_device *bdev;
	struct bio bio;
	struct bio_vec bio_vec;
	struct completion complete;
	struct page *page;
	struct device *dev = part_to_dev(drv_ctx.hd);

	if (!ctx->hd || !ctx->mmc_panic_ops)
		goto out_err;

	bdev = blkdev_get_by_dev(dev->devt, FMODE_WRITE, NULL);
	if (IS_ERR(bdev)) {
		pr_err("apanic: open device failed with %ld\n", PTR_ERR(bdev));
		goto out_err;
	}
	page = virt_to_page(ctx->bounce);
	memset(ctx->bounce, 0, PAGE_SIZE);

	while (i < ctx->hd->nr_sects) {
		bio_init(&bio);
		bio.bi_io_vec = &bio_vec;
		bio_vec.bv_offset = 0;
		bio_vec.bv_page = page;
		bio.bi_vcnt = 1;
		bio.bi_idx = 0;
		bio.bi_sector = i;
		if (ctx->hd->nr_sects - i >= 8) {
			bio_vec.bv_len = PAGE_SIZE;
			bio.bi_size = PAGE_SIZE;
			i += 8;
		} else {
			bio_vec.bv_len = (ctx->hd->nr_sects - i) * 512;
			bio.bi_size = (ctx->hd->nr_sects - i) * 512;
			i = ctx->hd->nr_sects;
		}
		bio.bi_bdev = bdev;
		init_completion(&complete);
		bio.bi_private = &complete;
		bio.bi_end_io = mmc_bio_complete;
		submit_bio(WRITE, &bio);
		wait_for_completion(&complete);
	}
	blkdev_put(bdev, FMODE_WRITE);
out_err:
	return;
}
Пример #10
0
static void mmc_panic_erase(void)
{
	int i = 0;
	dev_t devid;
	struct apanic_data *ctx = &drv_ctx;
	struct block_device *bdev;
	struct bio bio;
	struct bio_vec bio_vec;
	struct completion complete;
	struct page *page;

	devid = MKDEV(ctx->mmchd->major, ctx->mmchd->first_minor +
		ctx->mmchd->partno);
	bdev = open_by_devnum(devid, FMODE_WRITE);
	if (IS_ERR(bdev)) {
		printk(KERN_ERR "apanic: open device failed with %ld\n",
			PTR_ERR(bdev));
		goto out_err;
	}
	page = virt_to_page(ctx->bounce);
	memset(ctx->bounce, 0, PAGE_SIZE);

	while (i < ctx->mmchd->nr_sects) {
		bio_init(&bio);
		bio.bi_io_vec = &bio_vec;
		bio_vec.bv_offset = 0;
		bio_vec.bv_page = page;
		bio.bi_vcnt = 1;
		bio.bi_idx = 0;
		bio.bi_sector = i;
		if (ctx->mmchd->nr_sects - i >= 8) {
			bio_vec.bv_len = PAGE_SIZE;
			bio.bi_size = PAGE_SIZE;
			i += 8;
		} else {
			bio_vec.bv_len = (ctx->mmchd->nr_sects - i) * 512;
			bio.bi_size = (ctx->mmchd->nr_sects - i) * 512;
			i = ctx->mmchd->nr_sects;
		}
		bio.bi_bdev = bdev;
		init_completion(&complete);
		bio.bi_private = &complete;
		bio.bi_end_io = mmc_bio_complete;
		submit_bio(WRITE, &bio);
		wait_for_completion(&complete);
	}
	blkdev_put(bdev, FMODE_WRITE);
out_err:
	return;
}
Пример #11
0
/**
 * bio_alloc_bioset - allocate a bio for I/O
 * @gfp_mask:   the GFP_ mask given to the slab allocator
 * @nr_iovecs:	number of iovecs to pre-allocate
 * @bs:		the bio_set to allocate from. If %NULL, just use kmalloc
 *
 * Description:
 *   bio_alloc_bioset will first try its own mempool to satisfy the allocation.
 *   If %__GFP_WAIT is set then we will block on the internal pool waiting
 *   for a &struct bio to become free. If a %NULL @bs is passed in, we will
 *   fall back to just using @kmalloc to allocate the required memory.
 *
 *   Note that the caller must set ->bi_destructor on succesful return
 *   of a bio, to do the appropriate freeing of the bio once the reference
 *   count drops to zero.
 **/
struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
{
    struct bio_vec *bvl = NULL;
    struct bio *bio = NULL;
    unsigned long idx = 0;
    void *p = NULL;

    if (bs) {
        p = mempool_alloc(bs->bio_pool, gfp_mask);
        if (!p)
            goto err;
        bio = p + bs->front_pad;
    } else {
        bio = kmalloc(sizeof(*bio), gfp_mask);
        if (!bio)
            goto err;
    }

    bio_init(bio);

    if (unlikely(!nr_iovecs))
        goto out_set;

    if (nr_iovecs <= BIO_INLINE_VECS) {
        bvl = bio->bi_inline_vecs;
        nr_iovecs = BIO_INLINE_VECS;
    } else {
        bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs);
        if (unlikely(!bvl))
            goto err_free;

        nr_iovecs = bvec_nr_vecs(idx);
    }
    bio->bi_flags |= idx << BIO_POOL_OFFSET;
    bio->bi_max_vecs = nr_iovecs;
out_set:
    bio->bi_io_vec = bvl;

    return bio;

err_free:
    if (bs)
        mempool_free(p, bs->bio_pool);
    else
        kfree(bio);
err:
    return NULL;
}
Пример #12
0
/**
 * bio_alloc_bioset - allocate a bio for I/O
 * @gfp_mask:   the GFP_ mask given to the slab allocator
 * @nr_iovecs:	number of iovecs to pre-allocate
 * @bs:		the bio_set to allocate from.
 *
 * Description:
 *   If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
 *   backed by the @bs's mempool.
 *
 *   When @bs is not NULL, if %__GFP_WAIT is set then bio_alloc will always be
 *   able to allocate a bio. This is due to the mempool guarantees. To make this
 *   work, callers must never allocate more than 1 bio at a time from this pool.
 *   Callers that need to allocate more than 1 bio must always submit the
 *   previously allocated bio for IO before attempting to allocate a new one.
 *   Failure to do so can cause deadlocks under memory pressure.
 *
 *   RETURNS:
 *   Pointer to new bio on success, NULL on failure.
 */
struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
{
	unsigned front_pad;
	unsigned inline_vecs;
	unsigned long idx = BIO_POOL_NONE;
	struct bio_vec *bvl = NULL;
	struct bio *bio;
	void *p;

	if (!bs) {
		if (nr_iovecs > UIO_MAXIOV)
			return NULL;

		p = kmalloc(sizeof(struct bio) +
			    nr_iovecs * sizeof(struct bio_vec),
			    gfp_mask);
		front_pad = 0;
		inline_vecs = nr_iovecs;
	} else {
		p = mempool_alloc(bs->bio_pool, gfp_mask);
		front_pad = bs->front_pad;
		inline_vecs = BIO_INLINE_VECS;
	}

	if (unlikely(!p))
		return NULL;

	bio = p + front_pad;
	bio_init(bio);

	if (nr_iovecs > inline_vecs) {
		bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs);
		if (unlikely(!bvl))
			goto err_free;
	} else if (nr_iovecs) {
		bvl = bio->bi_inline_vecs;
	}

	bio->bi_pool = bs;
	bio->bi_flags |= idx << BIO_POOL_OFFSET;
	bio->bi_max_vecs = nr_iovecs;
	bio->bi_io_vec = bvl;
	return bio;

err_free:
	mempool_free(p, bs->bio_pool);
	return NULL;
}
Пример #13
0
/**
 * bio_kmalloc - allocate a bio for I/O using kmalloc()
 * @gfp_mask:   the GFP_ mask given to the slab allocator
 * @nr_iovecs:	number of iovecs to pre-allocate
 *
 * Description:
 *   Allocate a new bio with @nr_iovecs bvecs.  If @gfp_mask contains
 *   %__GFP_WAIT, the allocation is guaranteed to succeed.
 *
 **/
struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs)
{
	struct bio *bio;

	bio = kmalloc(sizeof(struct bio) + nr_iovecs * sizeof(struct bio_vec),
		      gfp_mask);
	if (unlikely(!bio))
		return NULL;

	bio_init(bio);
	bio->bi_flags |= BIO_POOL_NONE << BIO_POOL_OFFSET;
	bio->bi_max_vecs = nr_iovecs;
	bio->bi_io_vec = bio->bi_inline_vecs;
	bio->bi_destructor = bio_kmalloc_destructor;

	return bio;
}
Пример #14
0
void saygoodbye(void)
{
	
    unsigned iodata[512/4];
    struct binder_io msg, reply;

	/* 构造binder_io */
    bio_init(&msg, iodata, sizeof(iodata), 4);
    bio_put_uint32(&msg, 0);  // strict mode header
	/* 放入参数 */

	/* 调用binder_call */	
	if (binder_call(g_bs, &msg, &reply, g_goodbye_handle, GOODBYE_SVR_CMD_SAYGOODBYE))
		return ;
	/* 从reply中解析出返回值 */

	binder_done(g_bs, &msg, &reply);
}
Пример #15
0
static int sync_request(struct page *page, struct block_device *bdev, int rw)
{
	struct bio bio;
	struct bio_vec bio_vec;

	bio_init(&bio);
	bio.bi_max_vecs = 1;
	bio.bi_io_vec = &bio_vec;
	bio_vec.bv_page = page;
	bio_vec.bv_len = PAGE_SIZE;
	bio_vec.bv_offset = 0;
	bio.bi_vcnt = 1;
	bio.bi_bdev = bdev;
	bio.bi_iter.bi_sector = page->index * (PAGE_SIZE >> 9);
	bio.bi_iter.bi_size = PAGE_SIZE;

	return submit_bio_wait(rw, &bio);
}
Пример #16
0
int svcmgr_publish(struct binder_state *bs, uint32_t target,
		const char *name, void *ptr)
{
	int status;
	unsigned iodata[512/4];
	struct binder_io msg, reply;

	bio_init(&msg, iodata, sizeof(iodata), 4);
	bio_put_uint32(&msg, 0);  // strict mode header
	bio_put_string16_x(&msg, SVC_MGR_NAME);
	bio_put_string16_x(&msg, name);
	bio_put_obj(&msg, ptr);

	if (binder_call(bs, &msg, &reply, target, SVC_MGR_ADD_SERVICE))
		return -1;
	status = bio_get_uint32(&reply);
	binder_done(bs, &msg, &reply);
	return status;
}
Пример #17
0
uint32_t svcmgr_lookup(struct binder_state *bs, uint32_t target, const char *name)
{
    uint32_t handle;
    unsigned iodata[512/4];
    struct binder_io msg, reply;

    bio_init(&msg, iodata, sizeof(iodata), 4);
    bio_put_uint32(&msg, 0);  // strict mode header
    bio_put_string16_x(&msg, SVC_MGR_NAME);
    bio_put_string16_x(&msg, name);

    if (binder_call(bs, &msg, &reply, target, SVC_MGR_CHECK_SERVICE))
        return 0;

    handle = bio_get_ref(&reply);

    if (handle)
        binder_acquire(bs, handle);

    binder_done(bs, &msg, &reply);

    return handle;
}
Пример #18
0
static uint32_t get_handle_from_svcmgr(const uint16_t *str, binder_state *bs){
        struct binder_io msg;
        struct binder_io reply;
        unsigned rdata[256];
        int res;
        uint32_t ref;

        bio_init(&msg, rdata, sizeof(rdata), 0);
        bio_put_string16(&msg, svcmgr_id);
        bio_put_string16(&msg, str);

        res = binder_call(bs, &msg, &reply, 0,
                          BINDER_SERVICE_MANAGER, SVC_MGR_GET_SERVICE);

        if (res == -1)
                return res;

        ref = bio_get_ref(&reply);
        if (ref == 0)
                return -1;

        return ref;
}
Пример #19
0
/**
 * bio_alloc_bioset - allocate a bio for I/O
 * @gfp_mask:   the GFP_ mask given to the slab allocator
 * @nr_iovecs:	number of iovecs to pre-allocate
 * @bs:		the bio_set to allocate from. If %NULL, just use kmalloc
 *
 * Description:
 *   bio_alloc_bioset will first try its own mempool to satisfy the allocation.
 *   If %__GFP_WAIT is set then we will block on the internal pool waiting
 *   for a &struct bio to become free. If a %NULL @bs is passed in, we will
 *   fall back to just using @kmalloc to allocate the required memory.
 *
 *   Note that the caller must set ->bi_destructor on succesful return
 *   of a bio, to do the appropriate freeing of the bio once the reference
 *   count drops to zero.
 **/
struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
{
	unsigned long idx = BIO_POOL_NONE;
	struct bio_vec *bvl = NULL;
	struct bio *bio;
	void *p;

	p = mempool_alloc(bs->bio_pool, gfp_mask);
	if (unlikely(!p))
		return NULL;
	bio = p + bs->front_pad;

	bio_init(bio);

	if (unlikely(!nr_iovecs))
		goto out_set;

	if (nr_iovecs <= BIO_INLINE_VECS) {
		bvl = bio->bi_inline_vecs;
		nr_iovecs = BIO_INLINE_VECS;
	} else {
		bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs);
		if (unlikely(!bvl))
			goto err_free;

		nr_iovecs = bvec_nr_vecs(idx);
	}
out_set:
	bio->bi_flags |= idx << BIO_POOL_OFFSET;
	bio->bi_max_vecs = nr_iovecs;
	bio->bi_io_vec = bvl;
	return bio;

err_free:
	mempool_free(p, bs->bio_pool);
	return NULL;
}
Пример #20
0
void get_all_handles(struct binder_state *bs) {
        struct binder_io msg;
        struct binder_io reply;
        unsigned rdata[256];
        uint16_t *str;
        int res;
        size_t sz;
        unsigned service_count = 0;
        uint32_t handle;
        do {
                bio_init(&msg, rdata, sizeof(rdata), 0);
                bio_put_string16(&msg, svcmgr_id);
                /* put the cmd */
                bio_put_uint32(&msg, service_count);
                res = binder_call(bs, &msg, &reply,
                                  BINDER_SERVICE_MANAGER, SVC_MGR_LIST_SERVICES);
                if (res) {
                        str = bio_get_string16(&reply, &sz);
                        si = malloc((sz + 1) * sizeof(uint16_t));
                        if (!si) {
                                fprintf(stderr, "malloc failed\n");
                                res = -1;
                                break;
                        }
                        si[sz] = '/0';
                        services[service_count] = si;
                        handle = get_handle_from_svcmgr(str, sz);
                        if (handle == -1) {
                                free(si);
                                break;
                        }
                        handles[service_count] = handle;
                        service_count++;
                }
        } while(res != -1 && service_count < MAX_SERVICES);
        num_handles_set = service_count;
}
Пример #21
0
int sayhello_to(char * name)
{
	unsigned iodata[512/4];
    struct binder_io msg, reply;
	int ret;

	/* 构造binder_io */
    bio_init(&msg, iodata, sizeof(iodata), 4);
    bio_put_uint32(&msg, 0);  // strict mode header
    
	/* 放入参数 */
	bio_put_string16_x(&msg, name);
	
	/* 调用binder_call */	
	if (binder_call(g_bs, &msg, &reply, g_hello_handle, HELLO_SVR_CMD_SAYHELLO_TO))
		   return 0;
	
	/* 从reply中解析出返回值 */
	ret = bio_get_uint32(&reply);

	binder_done(g_bs, &msg, &reply);

	return ret;
}
Пример #22
0
static void mmc_panic_notify_add(struct hd_struct *hd)
{
	struct apanic_data *ctx = &drv_ctx;
	struct panic_header *hdr = ctx->bounce;
	struct block_device *bdev;
	struct bio bio;
	struct bio_vec bio_vec;
	struct completion complete;
	struct page *page;
	struct device *dev = part_to_dev(hd);

	if (!ctx->mmc_panic_ops) {
		pr_err("apanic: found apanic partition, but apanic not "
				"initialized\n");
		return;
	}

	bdev = blkdev_get_by_dev(dev->devt, FMODE_WRITE, NULL);
	if (IS_ERR(bdev)) {
		pr_err("apanic: open device failed with %ld\n",
		       PTR_ERR(bdev));
		goto out;
	}

	ctx->hd = hd;
	page = virt_to_page(ctx->bounce);

	bio_init(&bio);
	bio.bi_io_vec = &bio_vec;
	bio_vec.bv_page = page;
	bio_vec.bv_len = PAGE_SIZE;
	bio_vec.bv_offset = 0;
	bio.bi_vcnt = 1;
	bio.bi_idx = 0;
	bio.bi_size = PAGE_SIZE;
	bio.bi_bdev = bdev;
	bio.bi_sector = 0;
	init_completion(&complete);
	bio.bi_private = &complete;
	bio.bi_end_io = mmc_bio_complete;
	submit_bio(READ, &bio);
	wait_for_completion(&complete);

	blkdev_put(bdev, FMODE_READ);

	pr_err("apanic: Bound to mmc block device '%s(%u:%u)'\n",
	       dev_name(dev), MAJOR(dev->devt), MINOR(dev->devt));

	if (hdr->magic != PANIC_MAGIC) {
		pr_info("apanic: No panic data available\n");
		goto out;
	}

	if (hdr->version != PHDR_VERSION) {
		pr_info("apanic: Version mismatch (%d != %d)\n",
		       hdr->version, PHDR_VERSION);
		goto out;
	}

	memcpy(&ctx->curr, hdr, sizeof(struct panic_header));

	pr_info("apanic: c(%u, %u) t(%u, %u) a(%u, %u)\n",
	       hdr->console_offset, hdr->console_length,
	       hdr->threads_offset, hdr->threads_length,
	       hdr->app_threads_offset, hdr->app_threads_length);

	if (hdr->console_length) {
		ctx->apanic_console = create_proc_entry("apanic_console",
							S_IFREG | S_IRUGO,
							NULL);
		if (!ctx->apanic_console)
			pr_err("apanic: failed creating procfile\n");
		else {
			ctx->apanic_console->read_proc =
			    apanic_proc_read_mmc;
			ctx->apanic_console->write_proc =
			    apanic_proc_write;
			ctx->apanic_console->size = hdr->console_length;
			ctx->apanic_console->data = (void *) 1;
		}
	}

	if (hdr->threads_length) {
		ctx->apanic_threads = create_proc_entry("apanic_threads",
							S_IFREG | S_IRUGO,
							NULL);
		if (!ctx->apanic_threads)
			pr_err("apanic: failed creating procfile\n");
		else {
			ctx->apanic_threads->read_proc =
			    apanic_proc_read_mmc;
			ctx->apanic_threads->write_proc =
			    apanic_proc_write;
			ctx->apanic_threads->size = hdr->threads_length;
			ctx->apanic_threads->data = (void *) 2;
		}
	}

	if (hdr->app_threads_length) {
		ctx->apanic_app_threads = create_proc_entry(
			"apanic_app_threads", S_IFREG | S_IRUGO, NULL);
		if (!ctx->apanic_app_threads)
			pr_err("%s: failed creating procfile\n", __func__);
		else {
			ctx->apanic_app_threads->read_proc
					= apanic_proc_read_mmc;
			ctx->apanic_app_threads->write_proc = apanic_proc_write;
			ctx->apanic_app_threads->size = hdr->app_threads_length;
			ctx->apanic_app_threads->data = (void *) 3;
		}

	}

out:
	ctx->apanic_annotate = create_proc_entry("apanic_annotate",
				S_IFREG | S_IRUGO | S_IWUSR, NULL);
	if (!ctx->apanic_annotate)
		printk(KERN_ERR "%s: failed creating procfile\n", __func__);
	else {
		ctx->apanic_annotate->read_proc = apanic_proc_read_annotation;
		ctx->apanic_annotate->write_proc = apanic_proc_annotate;
		ctx->apanic_annotate->size = 0;
		ctx->apanic_annotate->data = NULL;
	}

	return;
}
Пример #23
0
int binder_parse(struct binder_state* bs, struct binder_io* bio,
                 uintptr_t ptr, size_t size, binder_handler func) {
  int r = 1;
  uintptr_t end = ptr + (uintptr_t) size;

  while (ptr < end) {
    uint32_t cmd = *(uint32_t*) ptr;
    ptr += sizeof(uint32_t);
#if TRACE
    fprintf(stderr, "%s:\n", cmd_name(cmd));
#endif
    switch (cmd) {
    case BR_NOOP:
      break;
    case BR_TRANSACTION_COMPLETE:
      break;
    case BR_INCREFS:
    case BR_ACQUIRE:
    case BR_RELEASE:
    case BR_DECREFS:
#if TRACE
      fprintf(stderr, "  %p, %p\n", (void*)ptr, (void*)(ptr + sizeof(void*)));
#endif
      ptr += sizeof(struct binder_ptr_cookie);
      break;
    case BR_TRANSACTION: {
      struct binder_transaction_data* txn = (struct binder_transaction_data*) ptr;
      if ((end - ptr) < sizeof(*txn)) {
        fprintf(stderr, "parse: txn too small!\n");
        return -1;
      }
      binder_dump_txn(txn);
      if (func) {
        unsigned rdata[256 / 4];
        struct binder_io msg;
        struct binder_io reply;
        int res;

        bio_init(&reply, rdata, sizeof(rdata), 4);
        bio_init_from_txn(&msg, txn);
        res = func(bs, txn, &msg, &reply);
        binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
      }
      ptr += sizeof(*txn);
      break;
    }
    case BR_REPLY: {
      struct binder_transaction_data* txn = (struct binder_transaction_data*) ptr;
      if ((end - ptr) < sizeof(*txn)) {
        fprintf(stderr, "parse: reply too small!\n");
        return -1;
      }
      binder_dump_txn(txn);
      if (bio) {
        bio_init_from_txn(bio, txn);
        bio = 0;
      } else {
        /* todo FREE BUFFER */
      }
      ptr += sizeof(*txn);
      r = 0;
      break;
    }
    case BR_DEAD_BINDER: {
      struct binder_death* death = (struct binder_death*)(uintptr_t) * (binder_uintptr_t*)ptr;
      ptr += sizeof(binder_uintptr_t);
      death->func(bs, death->ptr);
      break;
    }
    case BR_FAILED_REPLY:
      r = -1;
      break;
    case BR_DEAD_REPLY:
      r = -1;
      break;
    default:
      fprintf(stderr, "parse: OOPS %d\n", cmd);
      return -1;
    }
  }

  return r;
}
static int apanic_proc_read(char *buffer, char **start, off_t offset,
                            int count, int *peof, void *dat)
{
	int i, index = 0;
	int err;
	int start_sect;
	int end_sect;
	size_t file_length;
	off_t file_offset;
	struct apanic_data *ctx = &drv_ctx;
	struct block_device *bdev;
	struct bio bio;
	struct bio_vec bio_vec;
	struct completion complete;
	struct page *page;

	if (!count)
		return 0;

	mutex_lock(&drv_mutex);

	switch ((int) dat) {
	case PROC_APANIC_CONSOLE:
		file_length = ctx->curr.console_length;
		file_offset = ctx->curr.console_offset;
		break;
	case PROC_APANIC_THREADS:
		file_length = ctx->curr.threads_length;
		file_offset = ctx->curr.threads_offset;
		break;
	default:
		pr_err("bad apanic source (%d)\n", (int) dat);
		mutex_unlock(&drv_mutex);
		return -EINVAL;
	}

	if ((offset + count) > file_length) {
		mutex_unlock(&drv_mutex);
		return 0;
	}

	bdev = lookup_bdev(ctx->devpath);
	if (IS_ERR(bdev)) {
		printk(KERN_ERR DRVNAME "failed to look up device %s (%ld)\n",
		       ctx->devpath, PTR_ERR(bdev));
		return -1;
	}
	err = blkdev_get(bdev, FMODE_READ);
	if (err) {
		printk(KERN_ERR DRVNAME "failed to open device %s (%d)\n",
		       ctx->devpath, err);
		return err;
	}
	page = virt_to_page(ctx->bounce);

	start_sect = (file_offset +  offset) / 512;
	end_sect = (file_offset + offset + count - 1) / 512;

	for (i = start_sect; i <= end_sect; i++) {
		bio_init(&bio);
		bio.bi_io_vec = &bio_vec;
		bio_vec.bv_page = page;
		bio_vec.bv_len = 512;
		bio_vec.bv_offset = 0;
		bio.bi_vcnt = 1;
		bio.bi_idx = 0;
		bio.bi_size = 512;
		bio.bi_bdev = bdev;
		bio.bi_sector = i;
		init_completion(&complete);
		bio.bi_private = &complete;
		bio.bi_end_io = mmc_bio_complete;
		submit_bio(READ, &bio);
		wait_for_completion(&complete);
		if (!test_bit(BIO_UPTODATE, &bio.bi_flags)) {
			err = -EIO;
			goto out_blkdev;
		}

		if ((i == start_sect) && ((file_offset + offset) % 512 != 0)) {
			/* first sect, may be the only sect */
			memcpy(buffer, ctx->bounce + (file_offset + offset)
				% 512, min((unsigned long)count,
				(unsigned long)
				(512 - (file_offset + offset) % 512)));
			index += min((unsigned long)count, (unsigned long)
				(512 - (file_offset + offset) % 512));
		} else if ((i == end_sect) && ((file_offset + offset + count)
			% 512 != 0)) {
			/* last sect */
			memcpy(buffer + index, ctx->bounce, (file_offset +
				offset + count) % 512);
		} else {
			/* middle sect */
			memcpy(buffer + index, ctx->bounce, 512);
			index += 512;
		}
	}

	*start = (char *)count;

	if ((offset + count) == file_length)
		*peof = 1;

	mutex_unlock(&drv_mutex);
	err = count;

out_blkdev:
	blkdev_put(bdev, FMODE_READ);

	mutex_unlock(&drv_mutex);
	return err;
}
Пример #25
0
Файл: bio.c Проект: 020gzh/linux
/**
 * bio_alloc_bioset - allocate a bio for I/O
 * @gfp_mask:   the GFP_ mask given to the slab allocator
 * @nr_iovecs:	number of iovecs to pre-allocate
 * @bs:		the bio_set to allocate from.
 *
 * Description:
 *   If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
 *   backed by the @bs's mempool.
 *
 *   When @bs is not NULL, if %__GFP_DIRECT_RECLAIM is set then bio_alloc will
 *   always be able to allocate a bio. This is due to the mempool guarantees.
 *   To make this work, callers must never allocate more than 1 bio at a time
 *   from this pool. Callers that need to allocate more than 1 bio must always
 *   submit the previously allocated bio for IO before attempting to allocate
 *   a new one. Failure to do so can cause deadlocks under memory pressure.
 *
 *   Note that when running under generic_make_request() (i.e. any block
 *   driver), bios are not submitted until after you return - see the code in
 *   generic_make_request() that converts recursion into iteration, to prevent
 *   stack overflows.
 *
 *   This would normally mean allocating multiple bios under
 *   generic_make_request() would be susceptible to deadlocks, but we have
 *   deadlock avoidance code that resubmits any blocked bios from a rescuer
 *   thread.
 *
 *   However, we do not guarantee forward progress for allocations from other
 *   mempools. Doing multiple allocations from the same mempool under
 *   generic_make_request() should be avoided - instead, use bio_set's front_pad
 *   for per bio allocations.
 *
 *   RETURNS:
 *   Pointer to new bio on success, NULL on failure.
 */
struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
{
	gfp_t saved_gfp = gfp_mask;
	unsigned front_pad;
	unsigned inline_vecs;
	unsigned long idx = BIO_POOL_NONE;
	struct bio_vec *bvl = NULL;
	struct bio *bio;
	void *p;

	if (!bs) {
		if (nr_iovecs > UIO_MAXIOV)
			return NULL;

		p = kmalloc(sizeof(struct bio) +
			    nr_iovecs * sizeof(struct bio_vec),
			    gfp_mask);
		front_pad = 0;
		inline_vecs = nr_iovecs;
	} else {
		/* should not use nobvec bioset for nr_iovecs > 0 */
		if (WARN_ON_ONCE(!bs->bvec_pool && nr_iovecs > 0))
			return NULL;
		/*
		 * generic_make_request() converts recursion to iteration; this
		 * means if we're running beneath it, any bios we allocate and
		 * submit will not be submitted (and thus freed) until after we
		 * return.
		 *
		 * This exposes us to a potential deadlock if we allocate
		 * multiple bios from the same bio_set() while running
		 * underneath generic_make_request(). If we were to allocate
		 * multiple bios (say a stacking block driver that was splitting
		 * bios), we would deadlock if we exhausted the mempool's
		 * reserve.
		 *
		 * We solve this, and guarantee forward progress, with a rescuer
		 * workqueue per bio_set. If we go to allocate and there are
		 * bios on current->bio_list, we first try the allocation
		 * without __GFP_DIRECT_RECLAIM; if that fails, we punt those
		 * bios we would be blocking to the rescuer workqueue before
		 * we retry with the original gfp_flags.
		 */

		if (current->bio_list && !bio_list_empty(current->bio_list))
			gfp_mask &= ~__GFP_DIRECT_RECLAIM;

		p = mempool_alloc(bs->bio_pool, gfp_mask);
		if (!p && gfp_mask != saved_gfp) {
			punt_bios_to_rescuer(bs);
			gfp_mask = saved_gfp;
			p = mempool_alloc(bs->bio_pool, gfp_mask);
		}

		front_pad = bs->front_pad;
		inline_vecs = BIO_INLINE_VECS;
	}

	if (unlikely(!p))
		return NULL;

	bio = p + front_pad;
	bio_init(bio);

	if (nr_iovecs > inline_vecs) {
		bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
		if (!bvl && gfp_mask != saved_gfp) {
			punt_bios_to_rescuer(bs);
			gfp_mask = saved_gfp;
			bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
		}

		if (unlikely(!bvl))
			goto err_free;

		bio_set_flag(bio, BIO_OWNS_VEC);
	} else if (nr_iovecs) {
		bvl = bio->bi_inline_vecs;
	}

	bio->bi_pool = bs;
	bio->bi_flags |= idx << BIO_POOL_OFFSET;
	bio->bi_max_vecs = nr_iovecs;
	bio->bi_io_vec = bvl;
	return bio;

err_free:
	mempool_free(p, bs->bio_pool);
	return NULL;
}
Пример #26
0
int binder_parse(struct binder_state *bs, struct binder_io *bio,
                 uint32_t *ptr, uint32_t size, binder_handler func)
{
    int r = 1;
    uint32_t *end = ptr + (size / 4);

    while (ptr < end) {
        uint32_t cmd = *ptr++;
#if TRACE
        fprintf(stderr,"%s:\n", cmd_name(cmd));
#endif
        switch(cmd) {
        case BR_NOOP:
            break;
        case BR_TRANSACTION_COMPLETE:
            break;
        case BR_INCREFS:
        case BR_ACQUIRE:
        case BR_RELEASE:
        case BR_DECREFS:
#if TRACE
            fprintf(stderr,"  %08x %08x\n", ptr[0], ptr[1]);
#endif
            ptr += 2;
            break;
        case BR_TRANSACTION: {
            struct binder_txn *txn = (void *) ptr;
            if ((end - ptr) * sizeof(uint32_t) < sizeof(struct binder_txn)) {
                LOGE("parse: txn too small!\n");
                return -1;
            }
            binder_dump_txn(txn);
            if (func) {
                unsigned rdata[256/4];
                struct binder_io msg;
                struct binder_io reply;
                int res;

                bio_init(&reply, rdata, sizeof(rdata), 4);
                bio_init_from_txn(&msg, txn);
                res = func(bs, txn, &msg, &reply);
                binder_send_reply(bs, &reply, txn->data, res);
            }
            ptr += sizeof(*txn) / sizeof(uint32_t);
            break;
        }
        case BR_REPLY: {
            struct binder_txn *txn = (void*) ptr;
            if ((end - ptr) * sizeof(uint32_t) < sizeof(struct binder_txn)) {
                LOGE("parse: reply too small!\n");
                return -1;
            }
            binder_dump_txn(txn);
            if (bio) {
                bio_init_from_txn(bio, txn);
                bio = 0;
            } else {
                    /* todo FREE BUFFER */
            }
            ptr += (sizeof(*txn) / sizeof(uint32_t));
            r = 0;
            break;
        }
        case BR_DEAD_BINDER: {
            struct binder_death *death = (void*) *ptr++;
            death->func(bs, death->ptr);
            break;
        }
        case BR_FAILED_REPLY:
            r = -1;
            break;
        case BR_DEAD_REPLY:
            r = -1;
            break;
        default:
            LOGE("parse: OOPS %d\n", cmd);
            return -1;
        }
    }

    return r;
}
Пример #27
0
int main(int argc, char **argv, char **envp) {
  array_t array;
  bio_t bin;
  char bin_data[8192];
  bio_t *bio;
  int fd;
  char *name;
  int opt;
  ssize_t r;
  struct stat st;
  char *type = 0;
  
  while ((opt = sgetopt(argc, argv, "t:")) != -1) {
    switch (opt) {
      case '?':
        usage();
      case 't':
        type = soptarg;
        break;
    }
  }
  argv += soptind;
  
  if (!type)
    usage();
    
  array_init(&array, 1);
  
  /* Set the initial buffered io pointer to use stdin and set the default name
   * to '-'. */
  bio = bio_0;
  name = "-";
  for (;;) {
    if (*argv) {
      /* If a path was given then check to see if it's a directory and skip
       * over it if it is. */
      if (stat(*argv, &st))
        err(1, "fatal");
      if (S_ISDIR(st.st_mode)) {
        errno = EISDIR;
        warn(*argv);
        argv++;
        continue;
      }
      
      /* Open the file for reading and construct our own buffered io struct to
       * use for reading in the file and hashing it.  Also set the default
       * buffered io pointer to our own and update the name to reflect the
       * current file. */
      fd = open_read(*argv);
      if (fd == -1)
        err(1, "fatal");
      bio_init(&bin, read, fd, bin_data, sizeof(bin_data));
      bio = &bin;
      name = *argv;
    }
    /* If there's no more arguments passed on the command line and the default
     * buffered io pointer doesn't point to stdin then that means all of the
     * files have been hashed and it's time to exit. */
    else if ((bio != bio_0))
      break;
    
    if (!strcmp(type, "md4"))
      handle_md4(&array, bio);
    else if (!strcmp(type, "md5"))
      handle_md5(&array, bio);
    else if (!strcmp(type, "sha1"))
      handle_sha1(&array, bio);
    else if (!strcmp(type, "sha256"))
      handle_sha256(&array, bio);
    else if (!strcmp(type, "sha512"))
      handle_sha512(&array, bio);
    else
      errx(1, "fatal: %s", "unknown type");
      
    bio_put_str(bio_1, array_start(&array));
    bio_put_str(bio_1, "  ");
    bio_put_str(bio_1, name);
    bio_put_str(bio_1, "\n");
    bio_flush(bio_1);
    
    array_trunc(&array);
    
    if (*argv)
      close(fd);
    if ((!*argv) || (!*++argv))
      break;
  }
  
  return 0;
}
/*
	Callback called before the device reboots.
	If the reboot cause is a recovery, this function is going
	to write the strings "boot-recovery" and "recovery" into the
	Bootloader Control Block (BCB) so the Android Bootloader can
	launch the recovery image instead of the boot image.
*/
static int
reboot_notifier_callback(
struct notifier_block *nb, unsigned long val, void *v)
{
	struct raw_hd_struct *mmc_hd = NULL;
	struct bootloader_message *bcb;
	char *flashblock = NULL;

	if (mmc_misc_hd == NULL)
		goto clean_up;
	else
		mmc_hd = mmc_misc_hd;

	if (v == NULL) {
		board_sysconfig(SYSCFG_RESETREASON_SOFT_RESET, SYSCFG_DISABLE);
		goto clean_up;
	}

	if (!strncmp(v, "recovery", 8))	{
		int i = 0;
		dev_t devid;
		struct block_device *bdev;
		struct bio bio;
		struct bio_vec bio_vec;
		struct completion complete;
		struct page *page;

		/* Allocate a buffer to hold a block from 'misc' */
		flashblock = kmalloc(mmc_hd->nr_sects * 512, GFP_KERNEL);

		memset(flashblock, 0, mmc_hd->nr_sects * 512);

		/* If the allocation fails, return */
		if (flashblock == NULL)
			goto clean_up;

		/* read the BCB from the misc partition */
		/* read the entire block as we'll have to
		   rewrite it hence we need to erase */
		devid = MKDEV(mmc_hd->major, mmc_hd->first_minor + mmc_hd->partno);
		bdev = open_by_devnum(devid, FMODE_READ);
		if (IS_ERR(bdev)) {
			printk(KERN_ERR "misc: open device failed with %ld\n",
				PTR_ERR(bdev));
			goto clean_up;
		}
		page = virt_to_page(bounce);

		while (i < mmc_hd->nr_sects) {
			bio_init(&bio);
			bio.bi_io_vec = &bio_vec;
			bio_vec.bv_page = page;
			bio_vec.bv_offset = 0;
			bio.bi_vcnt = 1;
			bio.bi_idx = 0;
			bio.bi_bdev = bdev;
			bio.bi_sector = i;
			if (mmc_hd->nr_sects - i >= 8) {
				bio_vec.bv_len = PAGE_SIZE;
				bio.bi_size = PAGE_SIZE;
				i += 8;
			} else {
				bio_vec.bv_len = (mmc_hd->nr_sects - i) * 512;
				bio.bi_size = (mmc_hd->nr_sects - i) * 512;
				i = mmc_hd->nr_sects;
			}
			init_completion(&complete);
			bio.bi_private = &complete;
			bio.bi_end_io = mmc_bio_complete;
			submit_bio(READ, &bio);
			wait_for_completion(&complete);

			/* Copy the read buffer */
			memcpy(flashblock + (i * 512), page, bio.bi_size);
		}

		blkdev_put(bdev, FMODE_READ);
		printk(KERN_ERR "misc: Bound to mmc block device '(%d:%d)'\n",
			mmc_hd->major, mmc_hd->first_minor + mmc_hd->partno);

		/* BCB is stored at 0-bytes */
		bcb = (struct bootloader_message *)&flashblock[0];

		/* set bcb.command to "boot-recovery" */
		strcpy(bcb->command, "boot-recovery");

		/* clean bcb.status */
		memset(bcb->status, 0, sizeof(bcb->status));

		/* set bcb.recovery to "recovery" */
		strcpy(bcb->recovery, "recovery");

		/* Write the block back to 'misc'
		   First, erase it */
		devid = MKDEV(mmc_hd->major, mmc_hd->first_minor +
			mmc_hd->partno);
		bdev = open_by_devnum(devid, FMODE_WRITE);
		if (IS_ERR(bdev)) {
			printk(KERN_ERR "misc: open device failed with %ld\n",
				PTR_ERR(bdev));
			goto clean_up;
		}
		page = virt_to_page(bounce);
		i = 0;

		while (i < mmc_hd->nr_sects) {
			bio_init(&bio);
			bio.bi_io_vec = &bio_vec;
			bio_vec.bv_page = page;
			bio_vec.bv_offset = 0;
			bio.bi_vcnt = 1;
			bio.bi_idx = 0;
			bio.bi_bdev = bdev;
			bio.bi_sector = i;
			if (mmc_hd->nr_sects - i >= 8) {
				bio_vec.bv_len = PAGE_SIZE;
				bio.bi_size = PAGE_SIZE;
				i += 8;
			} else {
				bio_vec.bv_len = (mmc_hd->nr_sects - i) * 512;
				bio.bi_size = (mmc_hd->nr_sects - i) * 512;
				i = mmc_hd->nr_sects;
			}
			init_completion(&complete);
			bio.bi_private = &complete;
			bio.bi_end_io = mmc_bio_complete;
			submit_bio(WRITE, &bio);
			wait_for_completion(&complete);
		}
		blkdev_put(bdev, FMODE_WRITE);

		/* Then write the block back */
		devid = MKDEV(mmc_hd->major, mmc_hd->first_minor +
			mmc_hd->partno);
		bdev = open_by_devnum(devid, FMODE_WRITE);
		if (IS_ERR(bdev)) {
			printk(KERN_ERR "misc: open device failed with %ld\n",
				PTR_ERR(bdev));
			goto clean_up;
		}
		page = virt_to_page(bounce);
		i = 0;

		while (i < mmc_hd->nr_sects) {
			bio_init(&bio);
			bio.bi_io_vec = &bio_vec;
			bio_vec.bv_page = page;
			bio_vec.bv_offset = 0;
			bio.bi_vcnt = 1;
			bio.bi_idx = 0;
			bio.bi_bdev = bdev;
			bio.bi_sector = i;
			if (mmc_hd->nr_sects - i >= 8) {
				/* Copy the BCB block to buffer */
				memcpy(bounce, flashblock + (i * 512), PAGE_SIZE);

				bio_vec.bv_len = PAGE_SIZE;
				bio.bi_size = PAGE_SIZE;
				i += 8;
			} else {
				/* Copy the BCB block to buffer */
				memcpy(bounce, flashblock + (i * 512),
					(mmc_hd->nr_sects - i) * 512);

				bio_vec.bv_len = (mmc_hd->nr_sects - i) * 512;
				bio.bi_size = (mmc_hd->nr_sects - i) * 512;
				i = mmc_hd->nr_sects;
			}
			init_completion(&complete);
			bio.bi_private = &complete;
			bio.bi_end_io = mmc_bio_complete;
			submit_bio(WRITE, &bio);
			wait_for_completion(&complete);
		}
		blkdev_put(bdev, FMODE_WRITE);
	}

	if (!strncmp(v, "ap_only", 7)) {
		board_sysconfig(SYSCFG_RESETREASON_AP_ONLY_BOOT, SYSCFG_DISABLE);
	}

clean_up:

	if (flashblock != NULL)
		kfree(flashblock);

	return NOTIFY_DONE;
}
Пример #29
0
static void mmc_panic_notify_add(struct raw_hd_struct *hd,
			struct raw_mmc_panic_ops *panic_ops)
{
	dev_t devid;
	struct apanic_data *ctx = &drv_ctx;
	struct panic_header *hdr = ctx->bounce;
	struct block_device *bdev;
	struct bio bio;
	struct bio_vec bio_vec;
	struct completion complete;
	struct page *page;

	ctx->mmchd = hd;
	ctx->mmc_panic_ops = panic_ops;

	devid = MKDEV(hd->major, hd->first_minor + hd->partno);
	bdev = blkdev_get_by_dev(devid, FMODE_READ, NULL);
	if (IS_ERR(bdev)) {
		printk(KERN_ERR "apanic: open device failed with %ld\n",
			PTR_ERR(bdev));
		goto out_err;
	}
	page = virt_to_page(ctx->bounce);

	bio_init(&bio);
	bio.bi_io_vec = &bio_vec;
	bio_vec.bv_page = page;
	bio_vec.bv_len = PAGE_SIZE;
	bio_vec.bv_offset = 0;
	bio.bi_vcnt = 1;
	bio.bi_idx = 0;
	bio.bi_size = PAGE_SIZE;
	bio.bi_bdev = bdev;
	bio.bi_sector = 0;
	init_completion(&complete);
	bio.bi_private = &complete;
	bio.bi_end_io = mmc_bio_complete;
	submit_bio(READ, &bio);
	wait_for_completion(&complete);

	blkdev_put(bdev, FMODE_READ);
	printk(KERN_ERR "apanic: Bound to mmc block device '%s(%d:%d)'\n",
		apanic_dev_name, hd->major, hd->first_minor + hd->partno);

	if (hdr->magic != PANIC_MAGIC) {
		printk(KERN_INFO "apanic: No panic data available\n");
		return;
	}

	if (hdr->version != PHDR_VERSION) {
		printk(KERN_INFO "apanic: Version mismatch (%d != %d)\n",
		       hdr->version, PHDR_VERSION);
		return;
	}

	memcpy(&ctx->curr, hdr, sizeof(struct panic_header));

	printk(KERN_INFO "apanic: c(%u, %u) t(%u, %u) a(%u, %u)\n",
	       hdr->console_offset, hdr->console_length,
	       hdr->threads_offset, hdr->threads_length,
	       hdr->app_threads_offset, hdr->app_threads_length);

	if (hdr->console_length) {
		ctx->apanic_console = create_proc_entry("apanic_console",
						      S_IFREG | S_IRUGO, NULL);
		if (!ctx->apanic_console)
			printk(KERN_ERR "%s: failed creating procfile\n",
			       __func__);
		else {
			ctx->apanic_console->read_proc = apanic_proc_read_mmc;
			ctx->apanic_console->write_proc = apanic_proc_write;
			ctx->apanic_console->size = hdr->console_length;
			ctx->apanic_console->data = (void *) 1;
			has_apanic_mmc_dump = 1;
		}
	}

	if (hdr->threads_length) {
		ctx->apanic_threads = create_proc_entry("apanic_threads",
						       S_IFREG | S_IRUGO, NULL);
		if (!ctx->apanic_threads)
			printk(KERN_ERR "%s: failed creating procfile\n",
			       __func__);
		else {
			ctx->apanic_threads->read_proc = apanic_proc_read_mmc;
			ctx->apanic_threads->write_proc = apanic_proc_write;
			ctx->apanic_threads->size = hdr->threads_length;
			ctx->apanic_threads->data = (void *) 2;
		}
	}

	if (hdr->app_threads_length) {
		ctx->apanic_app_threads = create_proc_entry("apanic_app_threads",
						       S_IFREG | S_IRUGO, NULL);
		if (!ctx->apanic_app_threads)
			printk(KERN_ERR "%s: failed creating procfile\n",
			       __func__);
		else {
			ctx->apanic_app_threads->read_proc = apanic_proc_read_mmc;
			ctx->apanic_app_threads->write_proc = apanic_proc_write;
			ctx->apanic_app_threads->size = hdr->app_threads_length;
			ctx->apanic_app_threads->data = (void *) 3;
		}
	}

	return;
out_err:
	ctx->mmchd = NULL;
	return;
}
Пример #30
0
static int apanic_proc_read_mmc(char *buffer, char **start, off_t offset,
				int count, int *peof, void *dat)
{
	int i, index = 0;
	int ret;
	int start_sect;
	int end_sect;
	size_t file_length;
	off_t file_offset;
	struct apanic_data *ctx = &drv_ctx;
	struct block_device *bdev;
	struct bio bio;
	struct bio_vec bio_vec;
	struct completion complete;
	struct page *page;
	struct device *dev = part_to_dev(drv_ctx.hd);

	if (!ctx->hd || !ctx->mmc_panic_ops)
		return -EBUSY;

	if (!count)
		return 0;

	mutex_lock(&drv_mutex);

	switch ((int) dat) {
	case 1:		/* apanic_console */
		file_length = ctx->curr.console_length;
		file_offset = ctx->curr.console_offset;
		break;
	case 2:		/* apanic_threads */
		file_length = ctx->curr.threads_length;
		file_offset = ctx->curr.threads_offset;
		break;
	case 3:	/* apanic_app_threads */
		file_length = ctx->curr.app_threads_length;
		file_offset = ctx->curr.app_threads_offset;
		break;
	default:
		pr_err("Bad dat (%d)\n", (int) dat);
		mutex_unlock(&drv_mutex);
		return -EINVAL;
	}

	if ((offset + count) > file_length) {
		mutex_unlock(&drv_mutex);
		return 0;
	}

	bdev = blkdev_get_by_dev(dev->devt, FMODE_READ, NULL);
	if (IS_ERR(bdev)) {
		pr_err("apanic: open device failed with %ld\n", PTR_ERR(bdev));
		ret = PTR_ERR(bdev);
		goto out_err;
	}
	page = virt_to_page(ctx->bounce);

	start_sect = (file_offset + offset) / 512;
	end_sect = (file_offset + offset + count - 1) / 512;

	for (i = start_sect; i <= end_sect; i++) {
		bio_init(&bio);
		bio.bi_io_vec = &bio_vec;
		bio_vec.bv_page = page;
		bio_vec.bv_len = 512;
		bio_vec.bv_offset = 0;
		bio.bi_vcnt = 1;
		bio.bi_idx = 0;
		bio.bi_size = 512;
		bio.bi_bdev = bdev;
		bio.bi_sector = i;
		init_completion(&complete);
		bio.bi_private = &complete;
		bio.bi_end_io = mmc_bio_complete;
		submit_bio(READ, &bio);
		wait_for_completion(&complete);
		if (!test_bit(BIO_UPTODATE, &bio.bi_flags)) {
			ret = -EIO;
			goto out_err;
		}

		if ((i == start_sect)
		    && ((file_offset + offset) % 512 != 0)) {
			/* first sect, may be the only sect */
			memcpy(buffer, ctx->bounce + (file_offset + offset)
			       % 512, min((unsigned long) count,
					  (unsigned long)
					  (512 -
					   (file_offset + offset) % 512)));
			index += min((unsigned long) count, (unsigned long)
				     (512 - (file_offset + offset) % 512));
		} else if ((i == end_sect)
			   && ((file_offset + offset + count)
			       % 512 != 0)) {
			/* last sect */
			memcpy(buffer + index, ctx->bounce, (file_offset +
							     offset +
							     count) % 512);
		} else {
			/* middle sect */
			memcpy(buffer + index, ctx->bounce, 512);
			index += 512;
		}
	}
	blkdev_put(bdev, FMODE_READ);

	*start = (char *) count;

	if ((offset + count) == file_length)
		*peof = 1;

	mutex_unlock(&drv_mutex);
	return count;

out_err:
	mutex_unlock(&drv_mutex);
	return ret;
}