/* * Requests larger than 1/2 the cache size will be bypassed and go * directly to the disk. XXX tune this. */ int bcache_strategy(void *devdata, int unit, int rw, daddr_t blk, size_t size, char *buf, size_t *rsize) { static int bcache_unit = -1; struct bcache_devdata *dd = (struct bcache_devdata *)devdata; bcache_ops++; if(bcache_unit != unit) { bcache_flush(); bcache_unit = unit; } /* bypass large requests, or when the cache is inactive */ if ((bcache_data == NULL) || ((size * 2 / bcache_blksize) > bcache_nblks)) { DEBUG("bypass %d from %d", size / bcache_blksize, blk); bcache_bypasses++; return(dd->dv_strategy(dd->dv_devdata, rw, blk, size, buf, rsize)); } switch (rw) { case F_READ: return read_strategy(devdata, unit, rw, blk, size, buf, rsize); case F_WRITE: return write_strategy(devdata, unit, rw, blk, size, buf, rsize); } return -1; }
ssize_t res_counter_write(struct res_counter *counter, int member, const char __user *userbuf, size_t nbytes, loff_t *pos, int (*write_strategy)(char *st_buf, unsigned long long *val)) { int ret; char *buf, *end; unsigned long flags; unsigned long long tmp, *val; buf = kmalloc(nbytes + 1, GFP_KERNEL); ret = -ENOMEM; if (buf == NULL) goto out; buf[nbytes] = '\0'; ret = -EFAULT; if (copy_from_user(buf, userbuf, nbytes)) goto out_free; ret = -EINVAL; strstrip(buf); if (write_strategy) { if (write_strategy(buf, &tmp)) { goto out_free; } } else { tmp = simple_strtoull(buf, &end, 10); if (*end != '\0') goto out_free; } spin_lock_irqsave(&counter->lock, flags); val = res_counter_member(counter, member); *val = tmp; spin_unlock_irqrestore(&counter->lock, flags); ret = nbytes; out_free: kfree(buf); out: return ret; }