// NF_QUEUE operations response_t create_queue(int q_num, q_data_t **q_data, thread_data_t *data) { response_t r; int cq_idx; if(get_queue_idx(q_num, q_data) == -1 && (cq_idx = get_free_idx(q_data)) >=0 ) { q_data[cq_idx]->h = nfq_open(); if (!q_data[cq_idx]->h) syslog(LOG_ERR, "[%d] error during nfq_open()\n\r", data->idx); if (nfq_bind_pf(q_data[cq_idx]->h, AF_INET) < 0) syslog(LOG_ERR, "[%d] error during nfq_bind_pf()\n\r", data->idx); q_data[cq_idx]->qh = nfq_create_queue(q_data[cq_idx]->h, q_num, &cb, NULL); syslog(LOG_NOTICE,"[%d] q_num: %d\n\r", data->idx, q_num); if (!q_data[cq_idx]->qh) { r.cs = erl_mk_atom("error"); r.rsp = erl_mk_estring("failed to create queue", strlen("failed to create queue")); } else { q_data[cq_idx]->q_num = q_num; r.cs = erl_mk_atom("ok"); r.rsp = erl_mk_atom("ok"); } } else { r.cs = erl_mk_atom("error"); r.rsp = erl_mk_estring("queue in use", strlen("queue in use")); } return r; }
/* * Set a given TLS descriptor: */ int do_set_thread_area(struct task_struct *p, int idx, struct user_desc __user *u_info, int can_allocate) { struct user_desc info; if (copy_from_user(&info, u_info, sizeof(info))) return -EFAULT; if (idx == -1) idx = info.entry_number; /* * index -1 means the kernel should try to find and * allocate an empty descriptor: */ if (idx == -1 && can_allocate) { idx = get_free_idx(); if (idx < 0) return idx; if (put_user(idx, &u_info->entry_number)) return -EFAULT; } if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) return -EINVAL; set_tls_desc(p, idx, &info, 1); return 0; }
/* * Set a given TLS descriptor: * When you want addresses > 32bit use arch_prctl() */ int do_set_thread_area(struct thread_struct *t, struct user_desc __user *u_info) { struct user_desc info; struct n_desc_struct *desc; int cpu, idx; if (copy_from_user(&info, u_info, sizeof(info))) return -EFAULT; idx = info.entry_number; /* * index -1 means the kernel should try to find and * allocate an empty descriptor: */ if (idx == -1) { idx = get_free_idx(); if (idx < 0) return idx; if (put_user(idx, &u_info->entry_number)) return -EFAULT; } if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) return -EINVAL; desc = ((struct n_desc_struct *)t->tls_array) + idx - GDT_ENTRY_TLS_MIN; /* * We must not get preempted while modifying the TLS. */ cpu = get_cpu(); if (LDT_empty(&info)) { desc->a = 0; desc->b = 0; } else { desc->a = LDT_entry_a(&info); desc->b = LDT_entry_b(&info); } if (t == ¤t->thread) load_TLS(t, cpu); put_cpu(); return 0; }
/* * Set a given TLS descriptor: */ int do_set_thread_area(struct task_struct *p, int idx, struct user_desc __user *u_info, int can_allocate) { struct user_desc info; if (copy_from_user(&info, u_info, sizeof(info))) return -EFAULT; if (!tls_desc_okay(&info)) return -EINVAL; if (idx == -1) idx = info.entry_number; /* * index -1 means the kernel should try to find and * allocate an empty descriptor: */ if (idx == -1 && can_allocate) { idx = get_free_idx(); if (idx < 0) return idx; if (put_user(idx, &u_info->entry_number)) return -EFAULT; } if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) return -EINVAL; #ifdef CONFIG_PAX_SEGMEXEC if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE)) return -EINVAL; #endif set_tls_desc(p, idx, &info, 1); return 0; }
/* * Set a given TLS descriptor: */ int do_set_thread_area(struct task_struct *p, int idx, struct user_desc __user *u_info, int can_allocate) { struct user_desc info; unsigned short __maybe_unused sel, modified_sel; if (copy_from_user(&info, u_info, sizeof(info))) return -EFAULT; if (!tls_desc_okay(&info)) return -EINVAL; if (idx == -1) idx = info.entry_number; /* * index -1 means the kernel should try to find and * allocate an empty descriptor: */ if (idx == -1 && can_allocate) { idx = get_free_idx(); if (idx < 0) return idx; if (put_user(idx, &u_info->entry_number)) return -EFAULT; } if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) return -EINVAL; set_tls_desc(p, idx, &info, 1); /* * If DS, ES, FS, or GS points to the modified segment, forcibly * refresh it. Only needed on x86_64 because x86_32 reloads them * on return to user mode. */ modified_sel = (idx << 3) | 3; if (p == current) { #ifdef CONFIG_X86_64 savesegment(ds, sel); if (sel == modified_sel) loadsegment(ds, sel); savesegment(es, sel); if (sel == modified_sel) loadsegment(es, sel); savesegment(fs, sel); if (sel == modified_sel) loadsegment(fs, sel); savesegment(gs, sel); if (sel == modified_sel) load_gs_index(sel); #endif #ifdef CONFIG_X86_32_LAZY_GS savesegment(gs, sel); if (sel == modified_sel) loadsegment(gs, sel); #endif } else { #ifdef CONFIG_X86_64 if (p->thread.fsindex == modified_sel) p->thread.fsbase = info.base_addr; if (p->thread.gsindex == modified_sel) p->thread.gsbase = info.base_addr; #endif } return 0; }