static void flow_cache_flush_tasklet(unsigned long data) { struct flow_flush_info *info = (void *)data; int i; int cpu; cpu = smp_processor_id(); for (i = 0; i < flow_hash_size; i++) { struct flow_cache_entry *fle; fle = flow_table(cpu)[i]; for (; fle; fle = fle->next) { unsigned genid = atomic_read(&flow_cache_genid); if (!fle->object || fle->genid == genid) continue; fle->object = NULL; atomic_dec(fle->object_ref); } } if (atomic_dec_and_test(&info->cpuleft)) complete(&info->completion); }
static void __devinit flow_cache_cpu_prepare(int cpu) { struct tasklet_struct *tasklet; unsigned long order; for (order = 0; (PAGE_SIZE << order) < (sizeof(struct flow_cache_entry *)*flow_hash_size); order++) /* NOTHING */; flow_table(cpu) = (struct flow_cache_entry **) __get_free_pages(GFP_KERNEL, order); if (!flow_table(cpu)) panic("NET: failed to allocate flow cache order %lu\n", order); memset(flow_table(cpu), 0, PAGE_SIZE << order); flow_hash_rnd_recalc(cpu) = 1; flow_count(cpu) = 0; tasklet = flow_flush_tasklet(cpu); tasklet_init(tasklet, flow_cache_flush_tasklet, 0); }
static void __flow_cache_shrink(int cpu, int shrink_to) { struct flow_cache_entry *fle, **flp; int i; for (i = 0; i < flow_hash_size; i++) { int k = 0; flp = &flow_table(cpu)[i]; while ((fle = *flp) != NULL && k < shrink_to) { k++; flp = &fle->next; } while ((fle = *flp) != NULL) { *flp = fle->next; if (fle->object) atomic_dec(fle->object_ref); kmem_cache_free(flow_cachep, fle); flow_count(cpu)--; } } }
void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir, flow_resolve_t resolver) { struct flow_cache_entry *fle, **head; unsigned int hash; int cpu; local_bh_disable(); cpu = smp_processor_id(); fle = NULL; /* Packet really early in init? Making flow_cache_init a * pre-smp initcall would solve this. --RR */ if (!flow_table(cpu)) goto nocache; if (flow_hash_rnd_recalc(cpu)) flow_new_hash_rnd(cpu); hash = flow_hash_code(key, cpu); head = &flow_table(cpu)[hash]; for (fle = *head; fle; fle = fle->next) { if (fle->family == family && fle->dir == dir && flow_key_compare(key, &fle->key) == 0) { if (fle->genid == atomic_read(&flow_cache_genid)) { void *ret = fle->object; if (ret) atomic_inc(fle->object_ref); local_bh_enable(); return ret; } break; } } if (!fle) { if (flow_count(cpu) > flow_hwm) flow_cache_shrink(cpu); fle = kmem_cache_alloc(flow_cachep, SLAB_ATOMIC); if (fle) { fle->next = *head; *head = fle; fle->family = family; fle->dir = dir; memcpy(&fle->key, key, sizeof(*key)); fle->object = NULL; flow_count(cpu)++; } } nocache: { void *obj; atomic_t *obj_ref; resolver(key, family, dir, &obj, &obj_ref); if (fle) { fle->genid = atomic_read(&flow_cache_genid); if (fle->object) atomic_dec(fle->object_ref); fle->object = obj; fle->object_ref = obj_ref; if (obj) atomic_inc(fle->object_ref); } local_bh_enable(); return obj; } }
void usr_cmd_tables() { flow_table(); }
void usr_cmd_tables(){ POF_COMMAND_PRINT_HEAD("tables"); flow_table(); }