int fib_default_rule_add(struct fib_rules_ops *ops, u32 pref, u32 table, u32 flags) { struct fib_rule *r; r = kzalloc(ops->rule_size, GFP_KERNEL); if (r == NULL) return -ENOMEM; refcount_set(&r->refcnt, 1); r->action = FR_ACT_TO_TBL; r->pref = pref; r->table = table; r->flags = flags; r->proto = RTPROT_KERNEL; r->fr_net = ops->fro_net; r->uid_range = fib_kuid_range_unset; r->suppress_prefixlen = -1; r->suppress_ifgroup = -1; /* The lock is not required here, the list in unreacheable * at the moment this function is called */ list_add_tail(&r->list, &ops->rules_list); return 0; }
static in_cache_entry *in_cache_add_entry(__be32 dst_ip, struct mpoa_client *client) { in_cache_entry *entry = kzalloc(sizeof(in_cache_entry), GFP_KERNEL); if (entry == NULL) { pr_info("mpoa: mpoa_caches.c: new_in_cache_entry: out of memory\n"); return NULL; } dprintk("adding an ingress entry, ip = %pI4\n", &dst_ip); refcount_set(&entry->use, 1); dprintk("new_in_cache_entry: about to lock\n"); write_lock_bh(&client->ingress_lock); entry->next = client->in_cache; entry->prev = NULL; if (client->in_cache != NULL) client->in_cache->prev = entry; client->in_cache = entry; memcpy(entry->MPS_ctrl_ATM_addr, client->mps_ctrl_addr, ATM_ESA_LEN); entry->ctrl_info.in_dst_ip = dst_ip; do_gettimeofday(&(entry->tv)); entry->retry_time = client->parameters.mpc_p4; entry->count = 1; entry->entry_state = INGRESS_INVALID; entry->ctrl_info.holding_time = HOLDING_TIME_DEFAULT; refcount_inc(&entry->use); write_unlock_bh(&client->ingress_lock); dprintk("new_in_cache_entry: unlocked\n"); return entry; }
/* * partly or wholly fill a page that's under preparation for writing */ static int afs_fill_page(struct afs_vnode *vnode, struct key *key, loff_t pos, unsigned int len, struct page *page) { struct afs_read *req; int ret; _enter(",,%llu", (unsigned long long)pos); req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *), GFP_KERNEL); if (!req) return -ENOMEM; refcount_set(&req->usage, 1); req->pos = pos; req->len = len; req->nr_pages = 1; req->pages = req->array; req->pages[0] = page; get_page(page); ret = afs_fetch_data(vnode, key, req); afs_put_read(req); if (ret < 0) { if (ret == -ENOENT) { _debug("got NOENT from server" " - marking file deleted and stale"); set_bit(AFS_VNODE_DELETED, &vnode->flags); ret = -ESTALE; } } _leave(" = %d", ret); return ret; }
struct nsinfo *nsinfo__new(pid_t pid) { struct nsinfo *nsi; if (pid == 0) return NULL; nsi = calloc(1, sizeof(*nsi)); if (nsi != NULL) { nsi->pid = pid; nsi->tgid = pid; nsi->nstgid = pid; nsi->need_setns = false; /* Init may fail if the process exits while we're trying to look * at its proc information. In that case, save the pid but * don't try to enter the namespace. */ if (nsinfo__init(nsi) == -1) nsi->need_setns = false; refcount_set(&nsi->refcnt, 1); } return nsi; }
/** Create a new semaphore. * @param name Optional name for the semaphore, for debugging purposes. * @param count Initial count of the semaphore. * @param security Security attributes for the ACL. If NULL, default * attributes will be constructed which grant full access * to the semaphore to the calling process' user. * @param rights Access rights for the handle. * @param handlep Where to store handle to the semaphore. * @return Status code describing result of the operation. */ status_t kern_semaphore_create(const char *name, size_t count, const object_security_t *security, object_rights_t rights, handle_t *handlep) { object_security_t ksecurity = { -1, -1, NULL }; user_semaphore_t *sem; status_t ret; if(!handlep) return STATUS_INVALID_ARG; if(security) { ret = object_security_from_user(&ksecurity, security, true); if(ret != STATUS_SUCCESS) return ret; } /* Construct a default ACL if required. */ if(!ksecurity.acl) { ksecurity.acl = kmalloc(sizeof(*ksecurity.acl), MM_WAIT); object_acl_init(ksecurity.acl); object_acl_add_entry(ksecurity.acl, ACL_ENTRY_USER, -1, SEMAPHORE_RIGHT_USAGE); } sem = kmalloc(sizeof(user_semaphore_t), MM_WAIT); sem->id = id_allocator_alloc(&semaphore_id_allocator); if(sem->id < 0) { kfree(sem); object_security_destroy(&ksecurity); return STATUS_NO_SEMAPHORES; } if(name) { ret = strndup_from_user(name, SEMAPHORE_NAME_MAX, &sem->name); if(ret != STATUS_SUCCESS) { id_allocator_free(&semaphore_id_allocator, sem->id); kfree(sem); object_security_destroy(&ksecurity); return ret; } } else { sem->name = NULL; } object_init(&sem->obj, &semaphore_object_type, &ksecurity, NULL); object_security_destroy(&ksecurity); semaphore_init(&sem->sem, (sem->name) ? sem->name : "user_semaphore", count); refcount_set(&sem->count, 1); rwlock_write_lock(&semaphore_tree_lock); avl_tree_insert(&semaphore_tree, &sem->tree_link, sem->id, sem); rwlock_unlock(&semaphore_tree_lock); ret = object_handle_create(&sem->obj, NULL, rights, NULL, 0, NULL, NULL, handlep); if(ret != STATUS_SUCCESS) user_semaphore_release(sem); return ret; }
static struct nfulnl_instance * instance_create(struct net *net, u_int16_t group_num, u32 portid, struct user_namespace *user_ns) { struct nfulnl_instance *inst; struct nfnl_log_net *log = nfnl_log_pernet(net); int err; spin_lock_bh(&log->instances_lock); if (__instance_lookup(log, group_num)) { err = -EEXIST; goto out_unlock; } inst = kzalloc(sizeof(*inst), GFP_ATOMIC); if (!inst) { err = -ENOMEM; goto out_unlock; } if (!try_module_get(THIS_MODULE)) { kfree(inst); err = -EAGAIN; goto out_unlock; } INIT_HLIST_NODE(&inst->hlist); spin_lock_init(&inst->lock); /* needs to be two, since we _put() after creation */ refcount_set(&inst->use, 2); timer_setup(&inst->timer, nfulnl_timer, 0); inst->net = get_net(net); inst->peer_user_ns = user_ns; inst->peer_portid = portid; inst->group_num = group_num; inst->qthreshold = NFULNL_QTHRESH_DEFAULT; inst->flushtimeout = NFULNL_TIMEOUT_DEFAULT; inst->nlbufsiz = NFULNL_NLBUFSIZ_DEFAULT; inst->copy_mode = NFULNL_COPY_PACKET; inst->copy_range = NFULNL_COPY_RANGE_MAX; hlist_add_head_rcu(&inst->hlist, &log->instance_table[instance_hashfn(group_num)]); spin_unlock_bh(&log->instances_lock); return inst; out_unlock: spin_unlock_bh(&log->instances_lock); return ERR_PTR(err); }
/** * vsp1_dl_body_get - Obtain a body from a pool * @pool: The body pool * * Obtain a body from the pool without blocking. * * Returns a display list body or NULL if there are none available. */ struct vsp1_dl_body *vsp1_dl_body_get(struct vsp1_dl_body_pool *pool) { struct vsp1_dl_body *dlb = NULL; unsigned long flags; spin_lock_irqsave(&pool->lock, flags); if (!list_empty(&pool->free)) { dlb = list_first_entry(&pool->free, struct vsp1_dl_body, free); list_del(&dlb->free); refcount_set(&dlb->refcnt, 1); }
static void mdesc_handle_init(struct mdesc_handle *hp, unsigned int handle_size, void *base) { BUG_ON(((unsigned long)&hp->mdesc) & (16UL - 1)); memset(hp, 0, handle_size); INIT_LIST_HEAD(&hp->list); hp->self_base = base; refcount_set(&hp->refcnt, 1); hp->handle_size = handle_size; }
/* * Most part of f2fs_acl_clone, f2fs_acl_create_masq, f2fs_acl_create * are copied from posix_acl.c */ static struct posix_acl *f2fs_acl_clone(const struct posix_acl *acl, gfp_t flags) { struct posix_acl *clone = NULL; if (acl) { int size = sizeof(struct posix_acl) + acl->a_count * sizeof(struct posix_acl_entry); clone = kmemdup(acl, size, flags); if (clone) refcount_set(&clone->a_refcount, 1); } return clone; }
/** Create a new session. * @return Pointer to created session with 1 reference on, or NULL * if the session limit has been reached. */ session_t *session_create(void) { session_t *session; session = kmalloc(sizeof(*session), MM_WAIT); refcount_set(&session->count, 1); session->id = id_allocator_alloc(&session_id_allocator); if(session->id < 0) { kfree(session); return NULL; } dprintf("session: created session %d\n", session->id); return session; }
static inline struct nf_bridge_info *nf_bridge_unshare(struct sk_buff *skb) { struct nf_bridge_info *nf_bridge = skb->nf_bridge; if (refcount_read(&nf_bridge->use) > 1) { struct nf_bridge_info *tmp = nf_bridge_alloc(skb); if (tmp) { memcpy(tmp, nf_bridge, sizeof(struct nf_bridge_info)); refcount_set(&tmp->use, 1); } nf_bridge_put(nf_bridge); nf_bridge = tmp; } return nf_bridge; }
static struct ifacaddr6 *aca_alloc(struct fib6_info *f6i, const struct in6_addr *addr) { struct ifacaddr6 *aca; aca = kzalloc(sizeof(*aca), GFP_ATOMIC); if (!aca) return NULL; aca->aca_addr = *addr; fib6_info_hold(f6i); aca->aca_rt = f6i; aca->aca_users = 1; /* aca_tstamp should be updated upon changes */ aca->aca_cstamp = aca->aca_tstamp = jiffies; refcount_set(&aca->aca_refcnt, 1); return aca; }
static struct client_info *xio_create_client(struct xio_session *session, struct xio_connection *conn) { struct client_info *ci; ci = zalloc(sizeof(*ci)); if (!ci) return NULL; ci->type = CLIENT_INFO_TYPE_XIO; ci->conn.session = session; ci->conn.conn = conn; INIT_LIST_NODE(&ci->conn.list); refcount_set(&ci->refcnt, 0); INIT_LIST_HEAD(&ci->done_reqs); return ci; }
int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt, struct hns_roce_db *db) { struct hns_roce_user_db_page *page; int ret = 0; mutex_lock(&context->page_mutex); list_for_each_entry(page, &context->page_list, list) if (page->user_virt == (virt & PAGE_MASK)) goto found; page = kmalloc(sizeof(*page), GFP_KERNEL); if (!page) { ret = -ENOMEM; goto out; } refcount_set(&page->refcount, 1); page->user_virt = (virt & PAGE_MASK); page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, PAGE_SIZE, 0, 0); if (IS_ERR(page->umem)) { ret = PTR_ERR(page->umem); kfree(page); goto out; } list_add(&page->list, &context->page_list); found: db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK); db->u.user_page = page; refcount_inc(&page->refcount); out: mutex_unlock(&context->page_mutex); return ret; }
static struct ifacaddr6 *aca_alloc(struct rt6_info *rt, const struct in6_addr *addr) { struct inet6_dev *idev = rt->rt6i_idev; struct ifacaddr6 *aca; aca = kzalloc(sizeof(*aca), GFP_ATOMIC); if (!aca) return NULL; aca->aca_addr = *addr; in6_dev_hold(idev); aca->aca_idev = idev; aca->aca_rt = rt; aca->aca_users = 1; /* aca_tstamp should be updated upon changes */ aca->aca_cstamp = aca->aca_tstamp = jiffies; refcount_set(&aca->aca_refcnt, 1); return aca; }
struct nsinfo *nsinfo__copy(struct nsinfo *nsi) { struct nsinfo *nnsi; nnsi = calloc(1, sizeof(*nnsi)); if (nnsi != NULL) { nnsi->pid = nsi->pid; nnsi->tgid = nsi->tgid; nnsi->nstgid = nsi->nstgid; nnsi->need_setns = nsi->need_setns; if (nsi->mntns_path) { nnsi->mntns_path = strdup(nsi->mntns_path); if (!nnsi->mntns_path) { free(nnsi); return NULL; } } refcount_set(&nnsi->refcnt, 1); } return nnsi; }
static eg_cache_entry *eg_cache_add_entry(struct k_message *msg, struct mpoa_client *client) { eg_cache_entry *entry = kzalloc(sizeof(eg_cache_entry), GFP_KERNEL); if (entry == NULL) { pr_info("out of memory\n"); return NULL; } dprintk("adding an egress entry, ip = %pI4, this should be our IP\n", &msg->content.eg_info.eg_dst_ip); refcount_set(&entry->use, 1); dprintk("new_eg_cache_entry: about to lock\n"); write_lock_irq(&client->egress_lock); entry->next = client->eg_cache; entry->prev = NULL; if (client->eg_cache != NULL) client->eg_cache->prev = entry; client->eg_cache = entry; memcpy(entry->MPS_ctrl_ATM_addr, client->mps_ctrl_addr, ATM_ESA_LEN); entry->ctrl_info = msg->content.eg_info; do_gettimeofday(&(entry->tv)); entry->entry_state = EGRESS_RESOLVED; dprintk("new_eg_cache_entry cache_id %u\n", ntohl(entry->ctrl_info.cache_id)); dprintk("mps_ip = %pI4\n", &entry->ctrl_info.mps_ip); refcount_inc(&entry->use); write_unlock_irq(&client->egress_lock); dprintk("new_eg_cache_entry: unlocked\n"); return entry; }
static int __must_check ax25_rt_add(struct ax25_routes_struct *route) { ax25_route *ax25_rt; ax25_dev *ax25_dev; int i; if ((ax25_dev = ax25_addr_ax25dev(&route->port_addr)) == NULL) return -EINVAL; if (route->digi_count > AX25_MAX_DIGIS) return -EINVAL; write_lock_bh(&ax25_route_lock); ax25_rt = ax25_route_list; while (ax25_rt != NULL) { if (ax25cmp(&ax25_rt->callsign, &route->dest_addr) == 0 && ax25_rt->dev == ax25_dev->dev) { kfree(ax25_rt->digipeat); ax25_rt->digipeat = NULL; if (route->digi_count != 0) { if ((ax25_rt->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) { write_unlock_bh(&ax25_route_lock); return -ENOMEM; } ax25_rt->digipeat->lastrepeat = -1; ax25_rt->digipeat->ndigi = route->digi_count; for (i = 0; i < route->digi_count; i++) { ax25_rt->digipeat->repeated[i] = 0; ax25_rt->digipeat->calls[i] = route->digi_addr[i]; } } write_unlock_bh(&ax25_route_lock); return 0; } ax25_rt = ax25_rt->next; } if ((ax25_rt = kmalloc(sizeof(ax25_route), GFP_ATOMIC)) == NULL) { write_unlock_bh(&ax25_route_lock); return -ENOMEM; } refcount_set(&ax25_rt->refcount, 1); ax25_rt->callsign = route->dest_addr; ax25_rt->dev = ax25_dev->dev; ax25_rt->digipeat = NULL; ax25_rt->ip_mode = ' '; if (route->digi_count != 0) { if ((ax25_rt->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) { write_unlock_bh(&ax25_route_lock); kfree(ax25_rt); return -ENOMEM; } ax25_rt->digipeat->lastrepeat = -1; ax25_rt->digipeat->ndigi = route->digi_count; for (i = 0; i < route->digi_count; i++) { ax25_rt->digipeat->repeated[i] = 0; ax25_rt->digipeat->calls[i] = route->digi_addr[i]; } } ax25_rt->next = ax25_route_list; ax25_route_list = ax25_rt; write_unlock_bh(&ax25_route_lock); return 0; }
/* * Initialise an AFS network namespace record. */ static int __net_init afs_net_init(struct afs_net *net) { struct afs_sysnames *sysnames; int ret; net->live = true; generate_random_uuid((unsigned char *)&net->uuid); INIT_WORK(&net->charge_preallocation_work, afs_charge_preallocation); mutex_init(&net->socket_mutex); net->cells = RB_ROOT; seqlock_init(&net->cells_lock); INIT_WORK(&net->cells_manager, afs_manage_cells); timer_setup(&net->cells_timer, afs_cells_timer, 0); spin_lock_init(&net->proc_cells_lock); INIT_LIST_HEAD(&net->proc_cells); seqlock_init(&net->fs_lock); net->fs_servers = RB_ROOT; INIT_LIST_HEAD(&net->fs_updates); INIT_HLIST_HEAD(&net->fs_proc); INIT_HLIST_HEAD(&net->fs_addresses4); INIT_HLIST_HEAD(&net->fs_addresses6); seqlock_init(&net->fs_addr_lock); INIT_WORK(&net->fs_manager, afs_manage_servers); timer_setup(&net->fs_timer, afs_servers_timer, 0); ret = -ENOMEM; sysnames = kzalloc(sizeof(*sysnames), GFP_KERNEL); if (!sysnames) goto error_sysnames; sysnames->subs[0] = (char *)&afs_init_sysname; sysnames->nr = 1; refcount_set(&sysnames->usage, 1); net->sysnames = sysnames; rwlock_init(&net->sysnames_lock); /* Register the /proc stuff */ ret = afs_proc_init(net); if (ret < 0) goto error_proc; /* Initialise the cell DB */ ret = afs_cell_init(net, rootcell); if (ret < 0) goto error_cell_init; /* Create the RxRPC transport */ ret = afs_open_socket(net); if (ret < 0) goto error_open_socket; return 0; error_open_socket: net->live = false; afs_cell_purge(net); afs_purge_servers(net); error_cell_init: net->live = false; afs_proc_cleanup(net); error_proc: afs_put_sysnames(net->sysnames); error_sysnames: net->live = false; return ret; }
static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { struct ipv6_pinfo *np = inet6_sk(sk); struct net *net = sock_net(sk); int val, valbool; int retv = -ENOPROTOOPT; bool needs_rtnl = setsockopt_needs_rtnl(optname); if (!optval) val = 0; else { if (optlen >= sizeof(int)) { if (get_user(val, (int __user *) optval)) return -EFAULT; } else val = 0; } valbool = (val != 0); if (ip6_mroute_opt(optname)) return ip6_mroute_setsockopt(sk, optname, optval, optlen); if (needs_rtnl) rtnl_lock(); lock_sock(sk); switch (optname) { case IPV6_ADDRFORM: if (optlen < sizeof(int)) goto e_inval; if (val == PF_INET) { struct ipv6_txoptions *opt; struct sk_buff *pktopt; if (sk->sk_type == SOCK_RAW) break; if (sk->sk_protocol == IPPROTO_UDP || sk->sk_protocol == IPPROTO_UDPLITE) { struct udp_sock *up = udp_sk(sk); if (up->pending == AF_INET6) { retv = -EBUSY; break; } } else if (sk->sk_protocol != IPPROTO_TCP) break; if (sk->sk_state != TCP_ESTABLISHED) { retv = -ENOTCONN; break; } if (ipv6_only_sock(sk) || !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) { retv = -EADDRNOTAVAIL; break; } fl6_free_socklist(sk); __ipv6_sock_mc_close(sk); /* * Sock is moving from IPv6 to IPv4 (sk_prot), so * remove it from the refcnt debug socks count in the * original family... */ sk_refcnt_debug_dec(sk); if (sk->sk_protocol == IPPROTO_TCP) { struct inet_connection_sock *icsk = inet_csk(sk); local_bh_disable(); sock_prot_inuse_add(net, sk->sk_prot, -1); sock_prot_inuse_add(net, &tcp_prot, 1); local_bh_enable(); sk->sk_prot = &tcp_prot; icsk->icsk_af_ops = &ipv4_specific; sk->sk_socket->ops = &inet_stream_ops; sk->sk_family = PF_INET; tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); } else { struct proto *prot = &udp_prot; if (sk->sk_protocol == IPPROTO_UDPLITE) prot = &udplite_prot; local_bh_disable(); sock_prot_inuse_add(net, sk->sk_prot, -1); sock_prot_inuse_add(net, prot, 1); local_bh_enable(); sk->sk_prot = prot; sk->sk_socket->ops = &inet_dgram_ops; sk->sk_family = PF_INET; } opt = xchg((__force struct ipv6_txoptions **)&np->opt, NULL); if (opt) { atomic_sub(opt->tot_len, &sk->sk_omem_alloc); txopt_put(opt); } pktopt = xchg(&np->pktoptions, NULL); kfree_skb(pktopt); /* * ... and add it to the refcnt debug socks count * in the new family. -acme */ sk_refcnt_debug_inc(sk); module_put(THIS_MODULE); retv = 0; break; } goto e_inval; case IPV6_V6ONLY: if (optlen < sizeof(int) || inet_sk(sk)->inet_num) goto e_inval; sk->sk_ipv6only = valbool; retv = 0; break; case IPV6_RECVPKTINFO: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxinfo = valbool; retv = 0; break; case IPV6_2292PKTINFO: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxoinfo = valbool; retv = 0; break; case IPV6_RECVHOPLIMIT: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxhlim = valbool; retv = 0; break; case IPV6_2292HOPLIMIT: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxohlim = valbool; retv = 0; break; case IPV6_RECVRTHDR: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.srcrt = valbool; retv = 0; break; case IPV6_2292RTHDR: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.osrcrt = valbool; retv = 0; break; case IPV6_RECVHOPOPTS: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.hopopts = valbool; retv = 0; break; case IPV6_2292HOPOPTS: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.ohopopts = valbool; retv = 0; break; case IPV6_RECVDSTOPTS: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.dstopts = valbool; retv = 0; break; case IPV6_2292DSTOPTS: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.odstopts = valbool; retv = 0; break; case IPV6_TCLASS: if (optlen < sizeof(int)) goto e_inval; if (val < -1 || val > 0xff) goto e_inval; /* RFC 3542, 6.5: default traffic class of 0x0 */ if (val == -1) val = 0; np->tclass = val; retv = 0; break; case IPV6_RECVTCLASS: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxtclass = valbool; retv = 0; break; case IPV6_FLOWINFO: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxflow = valbool; retv = 0; break; case IPV6_RECVPATHMTU: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxpmtu = valbool; retv = 0; break; case IPV6_TRANSPARENT: if (valbool && !ns_capable(net->user_ns, CAP_NET_ADMIN) && !ns_capable(net->user_ns, CAP_NET_RAW)) { retv = -EPERM; break; } if (optlen < sizeof(int)) goto e_inval; /* we don't have a separate transparent bit for IPV6 we use the one in the IPv4 socket */ inet_sk(sk)->transparent = valbool; retv = 0; break; case IPV6_FREEBIND: if (optlen < sizeof(int)) goto e_inval; /* we also don't have a separate freebind bit for IPV6 */ inet_sk(sk)->freebind = valbool; retv = 0; break; case IPV6_RECVORIGDSTADDR: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxorigdstaddr = valbool; retv = 0; break; case IPV6_HOPOPTS: case IPV6_RTHDRDSTOPTS: case IPV6_RTHDR: case IPV6_DSTOPTS: { struct ipv6_txoptions *opt; /* remove any sticky options header with a zero option * length, per RFC3542. */ if (optlen == 0) optval = NULL; else if (!optval) goto e_inval; else if (optlen < sizeof(struct ipv6_opt_hdr) || optlen & 0x7 || optlen > 8 * 255) goto e_inval; /* hop-by-hop / destination options are privileged option */ retv = -EPERM; if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW)) break; opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk)); opt = ipv6_renew_options(sk, opt, optname, (struct ipv6_opt_hdr __user *)optval, optlen); if (IS_ERR(opt)) { retv = PTR_ERR(opt); break; } /* routing header option needs extra check */ retv = -EINVAL; if (optname == IPV6_RTHDR && opt && opt->srcrt) { struct ipv6_rt_hdr *rthdr = opt->srcrt; switch (rthdr->type) { #if IS_ENABLED(CONFIG_IPV6_MIP6) case IPV6_SRCRT_TYPE_2: if (rthdr->hdrlen != 2 || rthdr->segments_left != 1) goto sticky_done; break; #endif case IPV6_SRCRT_TYPE_4: { struct ipv6_sr_hdr *srh = (struct ipv6_sr_hdr *) opt->srcrt; if (!seg6_validate_srh(srh, optlen)) goto sticky_done; break; } default: goto sticky_done; } } retv = 0; opt = ipv6_update_options(sk, opt); sticky_done: if (opt) { atomic_sub(opt->tot_len, &sk->sk_omem_alloc); txopt_put(opt); } break; } case IPV6_PKTINFO: { struct in6_pktinfo pkt; if (optlen == 0) goto e_inval; else if (optlen < sizeof(struct in6_pktinfo) || !optval) goto e_inval; if (copy_from_user(&pkt, optval, sizeof(struct in6_pktinfo))) { retv = -EFAULT; break; } if (sk->sk_bound_dev_if && pkt.ipi6_ifindex != sk->sk_bound_dev_if) goto e_inval; np->sticky_pktinfo.ipi6_ifindex = pkt.ipi6_ifindex; np->sticky_pktinfo.ipi6_addr = pkt.ipi6_addr; retv = 0; break; } case IPV6_2292PKTOPTIONS: { struct ipv6_txoptions *opt = NULL; struct msghdr msg; struct flowi6 fl6; struct sockcm_cookie sockc_junk; struct ipcm6_cookie ipc6; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.flowi6_mark = sk->sk_mark; if (optlen == 0) goto update; /* 1K is probably excessive * 1K is surely not enough, 2K per standard header is 16K. */ retv = -EINVAL; if (optlen > 64*1024) break; opt = sock_kmalloc(sk, sizeof(*opt) + optlen, GFP_KERNEL); retv = -ENOBUFS; if (!opt) break; memset(opt, 0, sizeof(*opt)); refcount_set(&opt->refcnt, 1); opt->tot_len = sizeof(*opt) + optlen; retv = -EFAULT; if (copy_from_user(opt+1, optval, optlen)) goto done; msg.msg_controllen = optlen; msg.msg_control = (void *)(opt+1); ipc6.opt = opt; retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, &ipc6, &sockc_junk); if (retv) goto done; update: retv = 0; opt = ipv6_update_options(sk, opt); done: if (opt) { atomic_sub(opt->tot_len, &sk->sk_omem_alloc); txopt_put(opt); } break; } case IPV6_UNICAST_HOPS: if (optlen < sizeof(int)) goto e_inval; if (val > 255 || val < -1) goto e_inval; np->hop_limit = val; retv = 0; break; case IPV6_MULTICAST_HOPS: if (sk->sk_type == SOCK_STREAM) break; if (optlen < sizeof(int)) goto e_inval; if (val > 255 || val < -1) goto e_inval; np->mcast_hops = (val == -1 ? IPV6_DEFAULT_MCASTHOPS : val); retv = 0; break; case IPV6_MULTICAST_LOOP: if (optlen < sizeof(int)) goto e_inval; if (val != valbool) goto e_inval; np->mc_loop = valbool; retv = 0; break; case IPV6_UNICAST_IF: { struct net_device *dev = NULL; int ifindex; if (optlen != sizeof(int)) goto e_inval; ifindex = (__force int)ntohl((__force __be32)val); if (ifindex == 0) { np->ucast_oif = 0; retv = 0; break; } dev = dev_get_by_index(net, ifindex); retv = -EADDRNOTAVAIL; if (!dev) break; dev_put(dev); retv = -EINVAL; if (sk->sk_bound_dev_if) break; np->ucast_oif = ifindex; retv = 0; break; } case IPV6_MULTICAST_IF: if (sk->sk_type == SOCK_STREAM) break; if (optlen < sizeof(int)) goto e_inval; if (val) { struct net_device *dev; int midx; rcu_read_lock(); dev = dev_get_by_index_rcu(net, val); if (!dev) { rcu_read_unlock(); retv = -ENODEV; break; } midx = l3mdev_master_ifindex_rcu(dev); rcu_read_unlock(); if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val && (!midx || midx != sk->sk_bound_dev_if)) goto e_inval; } np->mcast_oif = val; retv = 0; break; case IPV6_ADD_MEMBERSHIP: case IPV6_DROP_MEMBERSHIP: { struct ipv6_mreq mreq; if (optlen < sizeof(struct ipv6_mreq)) goto e_inval; retv = -EPROTO; if (inet_sk(sk)->is_icsk) break; retv = -EFAULT; if (copy_from_user(&mreq, optval, sizeof(struct ipv6_mreq))) break; if (optname == IPV6_ADD_MEMBERSHIP) retv = ipv6_sock_mc_join(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_multiaddr); else retv = ipv6_sock_mc_drop(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_multiaddr); break; } case IPV6_JOIN_ANYCAST: case IPV6_LEAVE_ANYCAST: { struct ipv6_mreq mreq; if (optlen < sizeof(struct ipv6_mreq)) goto e_inval; retv = -EFAULT; if (copy_from_user(&mreq, optval, sizeof(struct ipv6_mreq))) break; if (optname == IPV6_JOIN_ANYCAST) retv = ipv6_sock_ac_join(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_acaddr); else retv = ipv6_sock_ac_drop(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_acaddr); break; } case MCAST_JOIN_GROUP: case MCAST_LEAVE_GROUP: { struct group_req greq; struct sockaddr_in6 *psin6; if (optlen < sizeof(struct group_req)) goto e_inval; retv = -EFAULT; if (copy_from_user(&greq, optval, sizeof(struct group_req))) break; if (greq.gr_group.ss_family != AF_INET6) { retv = -EADDRNOTAVAIL; break; } psin6 = (struct sockaddr_in6 *)&greq.gr_group; if (optname == MCAST_JOIN_GROUP) retv = ipv6_sock_mc_join(sk, greq.gr_interface, &psin6->sin6_addr); else retv = ipv6_sock_mc_drop(sk, greq.gr_interface, &psin6->sin6_addr); break; } case MCAST_JOIN_SOURCE_GROUP: case MCAST_LEAVE_SOURCE_GROUP: case MCAST_BLOCK_SOURCE: case MCAST_UNBLOCK_SOURCE: { struct group_source_req greqs; int omode, add; if (optlen < sizeof(struct group_source_req)) goto e_inval; if (copy_from_user(&greqs, optval, sizeof(greqs))) { retv = -EFAULT; break; } if (greqs.gsr_group.ss_family != AF_INET6 || greqs.gsr_source.ss_family != AF_INET6) { retv = -EADDRNOTAVAIL; break; } if (optname == MCAST_BLOCK_SOURCE) { omode = MCAST_EXCLUDE; add = 1; } else if (optname == MCAST_UNBLOCK_SOURCE) { omode = MCAST_EXCLUDE; add = 0; } else if (optname == MCAST_JOIN_SOURCE_GROUP) { struct sockaddr_in6 *psin6; psin6 = (struct sockaddr_in6 *)&greqs.gsr_group; retv = ipv6_sock_mc_join(sk, greqs.gsr_interface, &psin6->sin6_addr); /* prior join w/ different source is ok */ if (retv && retv != -EADDRINUSE) break; omode = MCAST_INCLUDE; add = 1; } else /* MCAST_LEAVE_SOURCE_GROUP */ { omode = MCAST_INCLUDE; add = 0; } retv = ip6_mc_source(add, omode, sk, &greqs); break; } case MCAST_MSFILTER: { struct group_filter *gsf; if (optlen < GROUP_FILTER_SIZE(0)) goto e_inval; if (optlen > sysctl_optmem_max) { retv = -ENOBUFS; break; } gsf = memdup_user(optval, optlen); if (IS_ERR(gsf)) { retv = PTR_ERR(gsf); break; } /* numsrc >= (4G-140)/128 overflow in 32 bits */ if (gsf->gf_numsrc >= 0x1ffffffU || gsf->gf_numsrc > sysctl_mld_max_msf) { kfree(gsf); retv = -ENOBUFS; break; } if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen) { kfree(gsf); retv = -EINVAL; break; } retv = ip6_mc_msfilter(sk, gsf); kfree(gsf); break; } case IPV6_ROUTER_ALERT: if (optlen < sizeof(int)) goto e_inval; retv = ip6_ra_control(sk, val); break; case IPV6_MTU_DISCOVER: if (optlen < sizeof(int)) goto e_inval; if (val < IPV6_PMTUDISC_DONT || val > IPV6_PMTUDISC_OMIT) goto e_inval; np->pmtudisc = val; retv = 0; break; case IPV6_MTU: if (optlen < sizeof(int)) goto e_inval; if (val && val < IPV6_MIN_MTU) goto e_inval; np->frag_size = val; retv = 0; break; case IPV6_RECVERR: if (optlen < sizeof(int)) goto e_inval; np->recverr = valbool; if (!val) skb_queue_purge(&sk->sk_error_queue); retv = 0; break; case IPV6_FLOWINFO_SEND: if (optlen < sizeof(int)) goto e_inval; np->sndflow = valbool; retv = 0; break; case IPV6_FLOWLABEL_MGR: retv = ipv6_flowlabel_opt(sk, optval, optlen); break; case IPV6_IPSEC_POLICY: case IPV6_XFRM_POLICY: retv = -EPERM; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) break; retv = xfrm_user_policy(sk, optname, optval, optlen); break; case IPV6_ADDR_PREFERENCES: { unsigned int pref = 0; unsigned int prefmask = ~0; if (optlen < sizeof(int)) goto e_inval; retv = -EINVAL; /* check PUBLIC/TMP/PUBTMP_DEFAULT conflicts */ switch (val & (IPV6_PREFER_SRC_PUBLIC| IPV6_PREFER_SRC_TMP| IPV6_PREFER_SRC_PUBTMP_DEFAULT)) { case IPV6_PREFER_SRC_PUBLIC: pref |= IPV6_PREFER_SRC_PUBLIC; break; case IPV6_PREFER_SRC_TMP: pref |= IPV6_PREFER_SRC_TMP; break; case IPV6_PREFER_SRC_PUBTMP_DEFAULT: break; case 0: goto pref_skip_pubtmp; default: goto e_inval; } prefmask &= ~(IPV6_PREFER_SRC_PUBLIC| IPV6_PREFER_SRC_TMP); pref_skip_pubtmp: /* check HOME/COA conflicts */ switch (val & (IPV6_PREFER_SRC_HOME|IPV6_PREFER_SRC_COA)) { case IPV6_PREFER_SRC_HOME: break; case IPV6_PREFER_SRC_COA: pref |= IPV6_PREFER_SRC_COA; case 0: goto pref_skip_coa; default: goto e_inval; } prefmask &= ~IPV6_PREFER_SRC_COA; pref_skip_coa: /* check CGA/NONCGA conflicts */ switch (val & (IPV6_PREFER_SRC_CGA|IPV6_PREFER_SRC_NONCGA)) { case IPV6_PREFER_SRC_CGA: case IPV6_PREFER_SRC_NONCGA: case 0: break; default: goto e_inval; } np->srcprefs = (np->srcprefs & prefmask) | pref; retv = 0; break; } case IPV6_MINHOPCOUNT: if (optlen < sizeof(int)) goto e_inval; if (val < 0 || val > 255) goto e_inval; np->min_hopcount = val; retv = 0; break; case IPV6_DONTFRAG: np->dontfrag = valbool; retv = 0; break; case IPV6_AUTOFLOWLABEL: np->autoflowlabel = valbool; np->autoflowlabel_set = 1; retv = 0; break; case IPV6_RECVFRAGSIZE: np->rxopt.bits.recvfragsize = valbool; retv = 0; break; } release_sock(sk); if (needs_rtnl) rtnl_unlock(); return retv; e_inval: release_sock(sk); if (needs_rtnl) rtnl_unlock(); return -EINVAL; }
/** * pvrdma_create_srq - create shared receive queue * @pd: protection domain * @init_attr: shared receive queue attributes * @udata: user data * * @return: the ib_srq pointer on success, otherwise returns an errno. */ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *init_attr, struct ib_udata *udata) { struct pvrdma_srq *srq = NULL; struct pvrdma_dev *dev = to_vdev(pd->device); union pvrdma_cmd_req req; union pvrdma_cmd_resp rsp; struct pvrdma_cmd_create_srq *cmd = &req.create_srq; struct pvrdma_cmd_create_srq_resp *resp = &rsp.create_srq_resp; struct pvrdma_create_srq_resp srq_resp = {0}; struct pvrdma_create_srq ucmd; unsigned long flags; int ret; if (!udata) { /* No support for kernel clients. */ dev_warn(&dev->pdev->dev, "no shared receive queue support for kernel client\n"); return ERR_PTR(-EOPNOTSUPP); } if (init_attr->srq_type != IB_SRQT_BASIC) { dev_warn(&dev->pdev->dev, "shared receive queue type %d not supported\n", init_attr->srq_type); return ERR_PTR(-EINVAL); } if (init_attr->attr.max_wr > dev->dsr->caps.max_srq_wr || init_attr->attr.max_sge > dev->dsr->caps.max_srq_sge) { dev_warn(&dev->pdev->dev, "shared receive queue size invalid\n"); return ERR_PTR(-EINVAL); } if (!atomic_add_unless(&dev->num_srqs, 1, dev->dsr->caps.max_srq)) return ERR_PTR(-ENOMEM); srq = kmalloc(sizeof(*srq), GFP_KERNEL); if (!srq) { ret = -ENOMEM; goto err_srq; } spin_lock_init(&srq->lock); refcount_set(&srq->refcnt, 1); init_completion(&srq->free); dev_dbg(&dev->pdev->dev, "create shared receive queue from user space\n"); if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { ret = -EFAULT; goto err_srq; } srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, ucmd.buf_size, 0, 0); if (IS_ERR(srq->umem)) { ret = PTR_ERR(srq->umem); goto err_srq; } srq->npages = ib_umem_page_count(srq->umem); if (srq->npages < 0 || srq->npages > PVRDMA_PAGE_DIR_MAX_PAGES) { dev_warn(&dev->pdev->dev, "overflow pages in shared receive queue\n"); ret = -EINVAL; goto err_umem; } ret = pvrdma_page_dir_init(dev, &srq->pdir, srq->npages, false); if (ret) { dev_warn(&dev->pdev->dev, "could not allocate page directory\n"); goto err_umem; } pvrdma_page_dir_insert_umem(&srq->pdir, srq->umem, 0); memset(cmd, 0, sizeof(*cmd)); cmd->hdr.cmd = PVRDMA_CMD_CREATE_SRQ; cmd->srq_type = init_attr->srq_type; cmd->nchunks = srq->npages; cmd->pd_handle = to_vpd(pd)->pd_handle; cmd->attrs.max_wr = init_attr->attr.max_wr; cmd->attrs.max_sge = init_attr->attr.max_sge; cmd->attrs.srq_limit = init_attr->attr.srq_limit; cmd->pdir_dma = srq->pdir.dir_dma; ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_SRQ_RESP); if (ret < 0) { dev_warn(&dev->pdev->dev, "could not create shared receive queue, error: %d\n", ret); goto err_page_dir; } srq->srq_handle = resp->srqn; srq_resp.srqn = resp->srqn; spin_lock_irqsave(&dev->srq_tbl_lock, flags); dev->srq_tbl[srq->srq_handle % dev->dsr->caps.max_srq] = srq; spin_unlock_irqrestore(&dev->srq_tbl_lock, flags); /* Copy udata back. */ if (ib_copy_to_udata(udata, &srq_resp, sizeof(srq_resp))) { dev_warn(&dev->pdev->dev, "failed to copy back udata\n"); pvrdma_destroy_srq(&srq->ibsrq); return ERR_PTR(-EINVAL); } return &srq->ibsrq; err_page_dir: pvrdma_page_dir_cleanup(dev, &srq->pdir); err_umem: ib_umem_release(srq->umem); err_srq: kfree(srq); atomic_dec(&dev->num_srqs); return ERR_PTR(ret); }
/* * Build a server list from a VLDB record. */ struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell, struct key *key, struct afs_vldb_entry *vldb, u8 type_mask) { struct afs_server_list *slist; struct afs_server *server; int ret = -ENOMEM, nr_servers = 0, i, j; for (i = 0; i < vldb->nr_servers; i++) if (vldb->fs_mask[i] & type_mask) nr_servers++; slist = kzalloc(struct_size(slist, servers, nr_servers), GFP_KERNEL); if (!slist) goto error; refcount_set(&slist->usage, 1); rwlock_init(&slist->lock); /* Make sure a records exists for each server in the list. */ for (i = 0; i < vldb->nr_servers; i++) { if (!(vldb->fs_mask[i] & type_mask)) continue; server = afs_lookup_server(cell, key, &vldb->fs_server[i]); if (IS_ERR(server)) { ret = PTR_ERR(server); if (ret == -ENOENT || ret == -ENOMEDIUM) continue; goto error_2; } /* Insertion-sort by UUID */ for (j = 0; j < slist->nr_servers; j++) if (memcmp(&slist->servers[j].server->uuid, &server->uuid, sizeof(server->uuid)) >= 0) break; if (j < slist->nr_servers) { if (slist->servers[j].server == server) { afs_put_server(cell->net, server); continue; } memmove(slist->servers + j + 1, slist->servers + j, (slist->nr_servers - j) * sizeof(struct afs_server_entry)); } slist->servers[j].server = server; slist->nr_servers++; } if (slist->nr_servers == 0) { ret = -EDESTADDRREQ; goto error_2; } return slist; error_2: afs_put_serverlist(cell->net, slist); error: return ERR_PTR(ret); }
static struct sock *tcp_fastopen_create_child(struct sock *sk, struct sk_buff *skb, struct dst_entry *dst, struct request_sock *req) { struct tcp_sock *tp; struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; struct sock *child; bool own_req; req->num_retrans = 0; req->num_timeout = 0; req->sk = NULL; child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, NULL, &own_req); if (!child) return NULL; spin_lock(&queue->fastopenq.lock); queue->fastopenq.qlen++; spin_unlock(&queue->fastopenq.lock); /* Initialize the child socket. Have to fix some values to take * into account the child is a Fast Open socket and is created * only out of the bits carried in the SYN packet. */ tp = tcp_sk(child); tp->fastopen_rsk = req; tcp_rsk(req)->tfo_listener = true; /* RFC1323: The window in SYN & SYN/ACK segments is never * scaled. So correct it appropriately. */ tp->snd_wnd = ntohs(tcp_hdr(skb)->window); tp->max_window = tp->snd_wnd; /* Activate the retrans timer so that SYNACK can be retransmitted. * The request socket is not added to the ehash * because it's been added to the accept queue directly. */ inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS, TCP_TIMEOUT_INIT, TCP_RTO_MAX); refcount_set(&req->rsk_refcnt, 2); /* Now finish processing the fastopen child socket. */ inet_csk(child)->icsk_af_ops->rebuild_header(child); tcp_init_congestion_control(child); tcp_mtup_init(child); tcp_init_metrics(child); tcp_call_bpf(child, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB); tcp_init_buffer_space(child); tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; tcp_fastopen_add_skb(child, skb); tcp_rsk(req)->rcv_nxt = tp->rcv_nxt; tp->rcv_wup = tp->rcv_nxt; /* tcp_conn_request() is sending the SYNACK, * and queues the child into listener accept queue. */ return child; }