static long calc_load_fold_idle(void) { int idx = calc_load_read_idx(); long delta = 0; if (atomic_long_read(&calc_load_idle[idx])) delta = atomic_long_xchg(&calc_load_idle[idx], 0); return delta; }
static void __pfq_group_free(int gid) { struct pfq_group * g = pfq_get_group(gid); struct sk_filter *filter; struct pfq_computation_tree *old_comp; void *old_ctx; if (!g) { pr_devel("[PFQ] get_group: invalid group id %d!\n", gid); return; } /* remove this gid from demux matrix */ pfq_devmap_update(map_reset, Q_ANY_DEVICE, Q_ANY_QUEUE, gid); g->pid = 0; g->owner = -1; g->policy = Q_POLICY_GROUP_UNDEFINED; filter = (struct sk_filter *)atomic_long_xchg(&g->bp_filter, 0L); old_comp = (struct pfq_computation_tree *)atomic_long_xchg(&g->comp, 0L); old_ctx = (void *)atomic_long_xchg(&g->comp_ctx, 0L); msleep(Q_GRACE_PERIOD); /* sleeping is possible here: user-context */ /* call fini on old computation */ if (old_comp) pfq_computation_fini(old_comp); kfree(old_comp); kfree(old_ctx); if (filter) pfq_free_sk_filter(filter); g->vlan_filt = false; pr_devel("[PFQ] group id:%d destroyed.\n", gid); }
static void __pfq_group_dtor(int gid) { struct pfq_group * that = &pfq_groups[gid]; void *context[Q_FUN_MAX]; struct sk_filter *filter; int i; /* remove this gid from demux matrix */ pfq_devmap_update(map_reset, Q_ANY_DEVICE, Q_ANY_QUEUE, gid); that->pid = 0; that->policy = Q_GROUP_UNDEFINED; for(i = 0; i < Q_FUN_MAX; i++) { atomic_long_set(&pfq_groups[gid].fun_ctx[i].function, 0L); context[i] = (void *)atomic_long_xchg(&pfq_groups[gid].fun_ctx[i].context, 0L); } filter = (struct sk_filter *)atomic_long_xchg(&pfq_groups[gid].filter, 0L); msleep(Q_GRACE_PERIOD); /* sleeping is possible here: user-context */ for(i = 0; i < Q_FUN_MAX; i++) { kfree(context[i]); } pfq_free_sk_filter(filter); that->vlan_filt = false; pr_devel("[PFQ] group id:%d destroyed.\n", gid); }
/* * read the cache state */ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer, size_t buflen, loff_t *pos) { struct cachefiles_cache *cache = file->private_data; unsigned long long b_released; unsigned f_released; char buffer[256]; int n; //_enter(",,%zu,", buflen); if (!test_bit(CACHEFILES_READY, &cache->flags)) return 0; /* check how much space the cache has */ cachefiles_has_space(cache, 0, 0); /* summarise */ f_released = atomic_xchg(&cache->f_released, 0); b_released = atomic_long_xchg(&cache->b_released, 0); clear_bit(CACHEFILES_STATE_CHANGED, &cache->flags); n = snprintf(buffer, sizeof(buffer), "cull=%c" " frun=%llx" " fcull=%llx" " fstop=%llx" " brun=%llx" " bcull=%llx" " bstop=%llx" " freleased=%x" " breleased=%llx", test_bit(CACHEFILES_CULLING, &cache->flags) ? '1' : '0', (unsigned long long) cache->frun, (unsigned long long) cache->fcull, (unsigned long long) cache->fstop, (unsigned long long) cache->brun, (unsigned long long) cache->bcull, (unsigned long long) cache->bstop, f_released, b_released); if (n > buflen) return -EMSGSIZE; if (copy_to_user(_buffer, buffer, n) != 0) return -EFAULT; return n; }
static void mr_alrt_leave(void) { unsigned long then; if (! atomic_xchg(&alrt_onoff, 0)) return; then = atomic_long_xchg(&alrt_start, 0); atomic_inc(&alrt_count); if (jiffies == then) atomic_long_add(jiffies_to_msecs(1) / 2, &alrt_time); else atomic_long_add(jiffies_to_msecs(jiffies - then), &alrt_time); }