struct aoedev * aoedev_set(ulong sysminor, unsigned char *addr, struct net_device *ifp, ulong bufcnt) { struct aoedev *d; ulong flags; spin_lock_irqsave(&devlist_lock, flags); for (d=devlist; d; d=d->next) if (d->sysminor == sysminor) break; if (d == NULL && (d = aoedev_newdev(bufcnt)) == NULL) { spin_unlock_irqrestore(&devlist_lock, flags); printk(KERN_INFO "aoe: aoedev_set: aoedev_newdev failure.\n"); return NULL; } /* if newdev, (d->flags & DEVFL_UP) == 0 for below */ spin_unlock_irqrestore(&devlist_lock, flags); spin_lock_irqsave(&d->lock, flags); d->ifp = ifp; memcpy(d->addr, addr, sizeof d->addr); if ((d->flags & DEVFL_UP) == 0) { aoedev_downdev(d); /* flushes outstanding frames */ d->sysminor = sysminor; d->aoemajor = AOEMAJOR(sysminor); d->aoeminor = AOEMINOR(sysminor); } spin_unlock_irqrestore(&d->lock, flags); return d; }
static void rexmit_timer(ulong vp) { struct aoedev *d; struct frame *f, *e; struct sk_buff *sl; register long timeout; ulong flags, n; d = (struct aoedev *) vp; sl = NULL; /* timeout is always ~150% of the moving average */ timeout = d->rttavg; timeout += timeout >> 1; spin_lock_irqsave(&d->lock, flags); if (d->flags & DEVFL_TKILL) { spin_unlock_irqrestore(&d->lock, flags); return; } f = d->frames; e = f + d->nframes; for (; f<e; f++) { if (f->tag != FREETAG && tsince(f->tag) >= timeout) { n = f->waited += timeout; n /= HZ; if (n > aoe_deadsecs) { /* waited too long for response */ aoedev_downdev(d); break; } rexmit(d, f); } } if (d->flags & DEVFL_KICKME) { d->flags &= ~DEVFL_KICKME; aoecmd_work(d); } sl = d->sendq_hd; d->sendq_hd = d->sendq_tl = NULL; if (sl) { n = d->rttavg <<= 1; if (n > MAXTIMER) d->rttavg = MAXTIMER; } d->timer.expires = jiffies + TIMERTICK; add_timer(&d->timer); spin_unlock_irqrestore(&d->lock, flags); aoenet_xmit(sl); }
void aoedev_exit(void) { struct aoedev *d; ulong flags; while ((d = devlist)) { devlist = d->next; spin_lock_irqsave(&d->lock, flags); aoedev_downdev(d); d->flags |= DEVFL_TKILL; spin_unlock_irqrestore(&d->lock, flags); del_timer_sync(&d->timer); aoedev_freedev(d); } }
int aoedev_flush(const char __user *str, size_t cnt) { ulong flags; struct aoedev *d, **dd; struct aoedev *rmd = NULL; char buf[16]; int all = 0; if (cnt >= 3) { if (cnt > sizeof buf) cnt = sizeof buf; if (copy_from_user(buf, str, cnt)) return -EFAULT; all = !strncmp(buf, "all", 3); } flush_scheduled_work(); spin_lock_irqsave(&devlist_lock, flags); dd = &devlist; while ((d = *dd)) { spin_lock(&d->lock); if ((!all && (d->flags & DEVFL_UP)) || (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE)) || d->nopen) { spin_unlock(&d->lock); dd = &d->next; continue; } *dd = d->next; aoedev_downdev(d); d->flags |= DEVFL_TKILL; spin_unlock(&d->lock); d->next = rmd; rmd = d; } spin_unlock_irqrestore(&devlist_lock, flags); while ((d = rmd)) { rmd = d->next; del_timer_sync(&d->timer); aoedev_freedev(d); /* must be able to sleep */ } return 0; }
void aoedev_exit(void) { struct aoedev *d; ulong flags; flush_scheduled_work(); while ((d = devlist)) { devlist = d->next; spin_lock_irqsave(&d->lock, flags); aoedev_downdev(d); spin_unlock_irqrestore(&d->lock, flags); del_timer_sync(&d->timer); aoedev_freedev(d); } }
static int flush(const char __user *str, size_t cnt, int exiting) { ulong flags; struct aoedev *d, **dd; char buf[16]; int all = 0; int specified = 0; /* flush a specific device */ unsigned int skipflags; skipflags = DEVFL_GDALLOC | DEVFL_NEWSIZE | DEVFL_TKILL; if (!exiting && cnt >= 3) { if (cnt > sizeof buf) cnt = sizeof buf; if (copy_from_user(buf, str, cnt)) return -EFAULT; all = !strncmp(buf, "all", 3); if (!all) specified = 1; } flush_scheduled_work(); /* pass one: without sleeping, do aoedev_downdev */ spin_lock_irqsave(&devlist_lock, flags); for (d = devlist; d; d = d->next) { spin_lock(&d->lock); if (exiting) { /* unconditionally take each device down */ } else if (specified) { if (!user_req(buf, cnt, d)) goto cont; } else if ((!all && (d->flags & DEVFL_UP)) || d->flags & skipflags || d->nopen || d->ref) goto cont; aoedev_downdev(d); d->flags |= DEVFL_TKILL; cont: spin_unlock(&d->lock); } spin_unlock_irqrestore(&devlist_lock, flags); /* pass two: call freedev, which might sleep, * for aoedevs marked with DEVFL_TKILL */ restart: spin_lock_irqsave(&devlist_lock, flags); for (d = devlist; d; d = d->next) { spin_lock(&d->lock); if (d->flags & DEVFL_TKILL && !(d->flags & DEVFL_FREEING)) { spin_unlock(&d->lock); spin_unlock_irqrestore(&devlist_lock, flags); freedev(d); goto restart; } spin_unlock(&d->lock); } /* pass three: remove aoedevs marked with DEVFL_FREED */ for (dd = &devlist, d = *dd; d; d = *dd) { struct aoedev *doomed = NULL; spin_lock(&d->lock); if (d->flags & DEVFL_FREED) { *dd = d->next; doomed = d; } else { dd = &d->next; } spin_unlock(&d->lock); if (doomed) kfree(doomed->targets); kfree(doomed); } spin_unlock_irqrestore(&devlist_lock, flags); return 0; }