Esempio n. 1
0
static void
rexmit_timer(ulong vp)
{
	struct aoedev *d;
	struct frame *f, *e;
	struct sk_buff *sl;
	register long timeout;
	ulong flags, n;

	d = (struct aoedev *) vp;
	sl = NULL;

	/* timeout is always ~150% of the moving average */
	timeout = d->rttavg;
	timeout += timeout >> 1;

	spin_lock_irqsave(&d->lock, flags);

	if (d->flags & DEVFL_TKILL) {
		spin_unlock_irqrestore(&d->lock, flags);
		return;
	}
	f = d->frames;
	e = f + d->nframes;
	for (; f<e; f++) {
		if (f->tag != FREETAG && tsince(f->tag) >= timeout) {
			n = f->waited += timeout;
			n /= HZ;
			if (n > aoe_deadsecs) { /* waited too long for response */
				aoedev_downdev(d);
				break;
			}
			rexmit(d, f);
		}
	}
	if (d->flags & DEVFL_KICKME) {
		d->flags &= ~DEVFL_KICKME;
		aoecmd_work(d);
	}

	sl = d->sendq_hd;
	d->sendq_hd = d->sendq_tl = NULL;
	if (sl) {
		n = d->rttavg <<= 1;
		if (n > MAXTIMER)
			d->rttavg = MAXTIMER;
	}

	d->timer.expires = jiffies + TIMERTICK;
	add_timer(&d->timer);

	spin_unlock_irqrestore(&d->lock, flags);

	aoenet_xmit(sl);
}
Esempio n. 2
0
static int
aoeblk_make_request(request_queue_t *q, struct bio *bio)
{
	struct aoedev *d;
	struct buf *buf;
	struct sk_buff *sl;
	ulong flags;

	blk_queue_bounce(q, &bio);

	d = bio->bi_bdev->bd_disk->private_data;
	buf = mempool_alloc(d->bufpool, GFP_NOIO);
	if (buf == NULL) {
		printk(KERN_INFO "aoe: buf allocation failure\n");
		bio_endio(bio, bio->bi_size, -ENOMEM);
		return 0;
	}
	memset(buf, 0, sizeof(*buf));
	INIT_LIST_HEAD(&buf->bufs);
	buf->start_time = jiffies;
	buf->bio = bio;
	buf->resid = bio->bi_size;
	buf->sector = bio->bi_sector;
	buf->bv = &bio->bi_io_vec[bio->bi_idx];
	WARN_ON(buf->bv->bv_len == 0);
	buf->bv_resid = buf->bv->bv_len;
	buf->bufaddr = page_address(buf->bv->bv_page) + buf->bv->bv_offset;

	spin_lock_irqsave(&d->lock, flags);

	if ((d->flags & DEVFL_UP) == 0) {
		printk(KERN_INFO "aoe: device %ld.%ld is not up\n",
			d->aoemajor, d->aoeminor);
		spin_unlock_irqrestore(&d->lock, flags);
		mempool_free(buf, d->bufpool);
		bio_endio(bio, bio->bi_size, -ENXIO);
		return 0;
	}

	list_add_tail(&buf->bufs, &d->bufq);

	aoecmd_work(d);
	sl = d->sendq_hd;
	d->sendq_hd = d->sendq_tl = NULL;

	spin_unlock_irqrestore(&d->lock, flags);
	aoenet_xmit(sl);

	return 0;
}
static int
aoeblk_make_request(struct request_queue *q, struct bio *bio)
{
	struct sk_buff_head queue;
	struct aoedev *d;
	struct buf *buf;
	ulong flags;

	blk_queue_bounce(q, &bio);

	if (bio == NULL) {
		printk(KERN_ERR "aoe: bio is NULL\n");
		BUG();
		return 0;
	}
	d = bio->bi_bdev->bd_disk->private_data;
	if (d == NULL) {
		printk(KERN_ERR "aoe: bd_disk->private_data is NULL\n");
		BUG();
		bio_endio(bio, -ENXIO);
		return 0;
	} else if (bio->bi_io_vec == NULL) {
		printk(KERN_ERR "aoe: bi_io_vec is NULL\n");
		BUG();
		bio_endio(bio, -ENXIO);
		return 0;
	}
	buf = mempool_alloc(d->bufpool, GFP_NOIO);
	if (buf == NULL) {
		printk(KERN_INFO "aoe: buf allocation failure\n");
		bio_endio(bio, -ENOMEM);
		return 0;
	}
	memset(buf, 0, sizeof(*buf));
	INIT_LIST_HEAD(&buf->bufs);
	buf->stime = jiffies;
	buf->bio = bio;
	buf->resid = bio->bi_size;
	buf->sector = bio->bi_sector;
	buf->bv = &bio->bi_io_vec[bio->bi_idx];
	buf->bv_resid = buf->bv->bv_len;
	WARN_ON(buf->bv_resid == 0);
	buf->bv_off = buf->bv->bv_offset;

	spin_lock_irqsave(&d->lock, flags);

	if ((d->flags & DEVFL_UP) == 0) {
		pr_info_ratelimited("aoe: device %ld.%d is not up\n",
			d->aoemajor, d->aoeminor);
		spin_unlock_irqrestore(&d->lock, flags);
		mempool_free(buf, d->bufpool);
		bio_endio(bio, -ENXIO);
		return 0;
	}

	list_add_tail(&buf->bufs, &d->bufq);

	aoecmd_work(d);
	__skb_queue_head_init(&queue);
	skb_queue_splice_init(&d->sendq, &queue);

	spin_unlock_irqrestore(&d->lock, flags);
	aoenet_xmit(&queue);

	return 0;
}