static struct fblock *fb_udp_ctor(char *name) { int ret = 0; struct fblock *fb; struct fb_udp_priv *fb_priv; fb = alloc_fblock(GFP_ATOMIC); if (!fb) return NULL; fb_priv = kzalloc(sizeof(*fb_priv), GFP_ATOMIC); if (!fb_priv) goto err; seqlock_init(&fb_priv->lock); fb_priv->port[0] = IDP_UNKNOWN; fb_priv->port[1] = IDP_UNKNOWN; ret = init_fblock(fb, name, fb_priv); if (ret) goto err2; fb->netfb_rx = fb_udp_netrx; fb->event_rx = fb_udp_event; ret = register_fblock_namespace(fb); if (ret) goto err3; __module_get(THIS_MODULE); return fb; err3: cleanup_fblock_ctor(fb); err2: kfree(fb_priv); err: kfree_fblock(fb); return NULL; }
static struct fblock *fb_counter_ctor(char *name) { int ret = 0; unsigned int cpu; struct fblock *fb; struct fb_counter_priv __percpu *fb_priv; struct proc_dir_entry *fb_proc; fb = alloc_fblock(GFP_ATOMIC); if (!fb) return NULL; fb_priv = alloc_percpu(struct fb_counter_priv); if (!fb_priv) goto err; get_online_cpus(); for_each_online_cpu(cpu) { struct fb_counter_priv *fb_priv_cpu; fb_priv_cpu = per_cpu_ptr(fb_priv, cpu); seqlock_init(&fb_priv_cpu->lock); fb_priv_cpu->port[0] = IDP_UNKNOWN; fb_priv_cpu->port[1] = IDP_UNKNOWN; fb_priv_cpu->packets = 0; fb_priv_cpu->bytes = 0; } put_online_cpus(); ret = init_fblock(fb, name, fb_priv); if (ret) goto err2; fb->netfb_rx = fb_counter_netrx; fb->event_rx = fb_counter_event; fb_proc = proc_create_data(fb->name, 0444, fblock_proc_dir, &fb_counter_proc_fops, (void *)(long) fb); if (!fb_proc) goto err3; ret = register_fblock_namespace(fb); if (ret) goto err4; __module_get(THIS_MODULE); return fb; err4: remove_proc_entry(fb->name, fblock_proc_dir); err3: cleanup_fblock_ctor(fb); err2: free_percpu(fb_priv); err: kfree_fblock(fb); return NULL; }
static struct fblock *fb_huf_ctor(char *name) { int ret = 0; struct fblock *fb; struct fb_huf_priv *fb_priv; struct proc_dir_entry *fb_proc; Node *tree; fb = alloc_fblock(GFP_ATOMIC); if (!fb) return NULL; fb_priv = kzalloc(sizeof(*fb_priv), GFP_ATOMIC); if (!fb_priv) goto err; seqlock_init(&fb_priv->lock); rwlock_init(&fb_priv->klock); fb_priv->port[0] = IDP_UNKNOWN; fb_priv->port[1] = IDP_UNKNOWN; ret = init_fblock(fb, name, fb_priv); if (ret) goto err2; fb->netfb_rx = fb_huf_netrx; fb->event_rx = fb_huf_event; // fb->linearize = fb_aes_linearize; // fb->delinearize = fb_aes_delinearize; fb_proc = proc_create_data(fb->name, 0444, fblock_proc_dir, &fb_huf_proc_fops, (void *)(long) fb); if (!fb_proc) goto err3; ret = register_fblock_namespace(fb); if (ret) goto err4; __module_get(THIS_MODULE); buildHuffmanTree(&tree); fillTable(tree, 0); invertCodes(); return fb; err4: remove_proc_entry(fb->name, fblock_proc_dir); err3: cleanup_fblock_ctor(fb); err2: kfree(fb_priv); err: kfree_fblock(fb); return NULL; }
static struct fblock *fb_counter_ctor(char *name) { int ret = 0; struct fblock *fb; struct fb_counter_priv *fb_priv; struct proc_dir_entry *fb_proc; fb = alloc_fblock(GFP_ATOMIC); if (!fb) return NULL; fb_priv = kzalloc(sizeof(*fb_priv), GFP_ATOMIC); if (!fb_priv) goto err; seqlock_init(&fb_priv->lock); fb_priv->port[0] = IDP_UNKNOWN; fb_priv->port[1] = IDP_UNKNOWN; fb_priv->packets = 0; fb_priv->bytes = 0; ret = init_fblock(fb, name, fb_priv); if (ret) goto err2; fb->netfb_rx = fb_counter_netrx; fb->event_rx = fb_counter_event; fb_proc = proc_create_data(fb->name, 0444, fblock_proc_dir, &fb_counter_proc_fops, (void *)(long) fb); if (!fb_proc) goto err3; ret = register_fblock_namespace(fb); if (ret) goto err4; __module_get(THIS_MODULE); return fb; err4: remove_proc_entry(fb->name, fblock_proc_dir); err3: cleanup_fblock_ctor(fb); err2: kfree(fb_priv); err: kfree_fblock(fb); return NULL; }
static struct fblock *fb_crr_rx_ctor(char *name) { int ret = 0; unsigned int cpu, *tmp_rx_bitstream; unsigned char *tmp_expected_seq_nr, *tmp_rx_win_nr; struct sk_buff_head *tmp_list; struct fblock *fb; struct fb_crr_rx_priv __percpu *fb_priv; rwlock_t *tmp_rx_lock; fb = alloc_fblock(GFP_ATOMIC); if (!fb) return NULL; fb_priv = alloc_percpu(struct fb_crr_rx_priv); if (!fb_priv) goto err; if (unlikely((tmp_rx_bitstream = kzalloc(sizeof(unsigned int), GFP_ATOMIC)) == NULL)) goto err_; if (unlikely((tmp_rx_win_nr = kzalloc(sizeof(unsigned char), GFP_ATOMIC)) == NULL)) goto err__; if (unlikely((tmp_rx_lock = kzalloc(sizeof(rwlock_t), GFP_ATOMIC)) == NULL)) goto err0; if (unlikely((tmp_list = kzalloc(sizeof(struct sk_buff_head), GFP_ATOMIC)) == NULL)) goto err1; if (unlikely((tmp_expected_seq_nr = kzalloc(sizeof(unsigned char), GFP_ATOMIC)) == NULL)) goto err1a; rwlock_init(tmp_rx_lock); *tmp_rx_bitstream = 0; *tmp_rx_win_nr = 0; *tmp_expected_seq_nr = 1; skb_queue_head_init(tmp_list); get_online_cpus(); for_each_online_cpu(cpu) { struct fb_crr_rx_priv *fb_priv_cpu; fb_priv_cpu = per_cpu_ptr(fb_priv, cpu); seqlock_init(&fb_priv_cpu->lock); //rwlock_init(&fb_priv_cpu->rx_lock); fb_priv_cpu->rx_lock = tmp_rx_lock; fb_priv_cpu->port[0] = IDP_UNKNOWN; fb_priv_cpu->port[1] = IDP_UNKNOWN; fb_priv_cpu->rx_seq_nr = tmp_expected_seq_nr; fb_priv_cpu->list = tmp_list; fb_priv_cpu->rx_bitstream = tmp_rx_bitstream; fb_priv_cpu->rx_win_nr = tmp_rx_win_nr; } put_online_cpus(); ret = init_fblock(fb, name, fb_priv); if (ret) goto err2; fb->netfb_rx = fb_crr_rx_netrx; fb->event_rx = fb_crr_rx_event; ret = register_fblock_namespace(fb); if (ret) goto err3; __module_get(THIS_MODULE); printk(KERN_ERR "[CRR_RX] Initialization passed!\n"); return fb; err3: cleanup_fblock_ctor(fb); err2: kfree(tmp_expected_seq_nr); err1a: kfree(tmp_list); err1: kfree(tmp_rx_lock); err0: kfree(tmp_rx_win_nr); err__: kfree(tmp_rx_bitstream); err_: free_percpu(fb_priv); err: kfree_fblock(fb); printk(KERN_ERR "[CRR_RX] Initialization failed!\n"); return NULL; }