static int __f2fs_set_acl(struct inode *inode, int type, struct posix_acl *acl, struct page *ipage) { int name_index; void *value = NULL; size_t size = 0; int error; umode_t mode = inode->i_mode; switch (type) { case ACL_TYPE_ACCESS: name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS; if (acl && !ipage) { error = posix_acl_update_mode(inode, &mode, &acl); if (error) return error; set_acl_inode(inode, mode); } break; case ACL_TYPE_DEFAULT: name_index = F2FS_XATTR_INDEX_POSIX_ACL_DEFAULT; if (!S_ISDIR(inode->i_mode)) return acl ? -EACCES : 0; break; default: return -EINVAL; } if (acl) { value = f2fs_acl_to_disk(F2FS_I_SB(inode), acl, &size); if (IS_ERR(value)) { clear_inode_flag(inode, FI_ACL_MODE); return PTR_ERR(value); } } error = f2fs_setxattr(inode, name_index, "", value, size, ipage, 0); kvfree(value); if (!error) set_cached_acl(inode, type, acl); clear_inode_flag(inode, FI_ACL_MODE); return error; }
static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, struct ib_qp_init_attr *init, struct ib_ucontext *context, struct ib_udata *udata) { int err; int wqe_size; if (!qp->srq) { qp->rq.max_wr = init->cap.max_recv_wr; qp->rq.max_sge = init->cap.max_recv_sge; wqe_size = rcv_wqe_size(qp->rq.max_sge); pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n", qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size); qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr, wqe_size); if (!qp->rq.queue) return -ENOMEM; err = do_mmap_info(rxe, udata, false, context, qp->rq.queue->buf, qp->rq.queue->buf_size, &qp->rq.queue->ip); if (err) { kvfree(qp->rq.queue->buf); kfree(qp->rq.queue); return err; } } spin_lock_init(&qp->rq.producer_lock); spin_lock_init(&qp->rq.consumer_lock); skb_queue_head_init(&qp->resp_pkts); rxe_init_task(rxe, &qp->resp.task, qp, rxe_responder, "resp"); qp->resp.opcode = OPCODE_NONE; qp->resp.msn = 0; qp->resp.state = QP_STATE_RESET; return 0; }
static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) { struct mlx5e_channel *c = cq->channel; struct mlx5e_priv *priv = c->priv; struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_cq *mcq = &cq->mcq; void *in; void *cqc; int inlen; int irqn_not_used; int eqn; int err; inlen = MLX5_ST_SZ_BYTES(create_cq_in) + sizeof(u64) * cq->wq_ctrl.buf.npages; in = mlx5_vzalloc(inlen); if (!in) return -ENOMEM; cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); memcpy(cqc, param->cqc, sizeof(param->cqc)); mlx5_fill_page_array(&cq->wq_ctrl.buf, (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas)); mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used); MLX5_SET(cqc, cqc, c_eqn, eqn); MLX5_SET(cqc, cqc, uar_page, mcq->uar->index); MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - PAGE_SHIFT); MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); err = mlx5_core_create_cq(mdev, mcq, in, inlen); kvfree(in); if (err) return err; mlx5e_cq_arm(cq); return 0; }
static void null_free_reqbuf(struct ptlrpc_sec *sec, struct ptlrpc_request *req) { if (!req->rq_pool) { LASSERTF(req->rq_reqmsg == req->rq_reqbuf, "req %p: reqmsg %p is not reqbuf %p in null sec\n", req, req->rq_reqmsg, req->rq_reqbuf); LASSERTF(req->rq_reqbuf_len >= req->rq_reqlen, "req %p: reqlen %d should smaller than buflen %d\n", req, req->rq_reqlen, req->rq_reqbuf_len); kvfree(req->rq_reqbuf); req->rq_reqbuf = NULL; req->rq_reqbuf_len = 0; } }
static struct table_header *unpack_table(char *blob, size_t bsize) { struct table_header *table = NULL; struct table_header th; size_t tsize; if (bsize < sizeof(struct table_header)) goto out; th.td_id = be16_to_cpu(*(u16 *) (blob)) - 1; th.td_flags = be16_to_cpu(*(u16 *) (blob + 2)); th.td_lolen = be32_to_cpu(*(u32 *) (blob + 8)); blob += sizeof(struct table_header); if (!(th.td_flags == YYTD_DATA16 || th.td_flags == YYTD_DATA32 || th.td_flags == YYTD_DATA8)) goto out; tsize = table_size(th.td_lolen, th.td_flags); if (bsize < tsize) goto out; table = kvmalloc(tsize); if (table) { *table = th; if (th.td_flags == YYTD_DATA8) UNPACK_ARRAY(table->td_data, blob, th.td_lolen, u8, byte_to_byte); else if (th.td_flags == YYTD_DATA16) UNPACK_ARRAY(table->td_data, blob, th.td_lolen, u16, be16_to_cpu); else if (th.td_flags == YYTD_DATA32) UNPACK_ARRAY(table->td_data, blob, th.td_lolen, u32, be32_to_cpu); else goto fail; } out: if (is_vmalloc_addr(table)) vm_unmap_aliases(); return table; fail: kvfree(table); return NULL; }
void nvdimm_drvdata_release(struct kref *kref) { struct nvdimm_drvdata *ndd = container_of(kref, typeof(*ndd), kref); struct device *dev = ndd->dev; struct resource *res, *_r; dev_dbg(dev, "%s\n", __func__); nvdimm_bus_lock(dev); for_each_dpa_resource_safe(ndd, res, _r) nvdimm_free_dpa(ndd, res); nvdimm_bus_unlock(dev); kvfree(ndd->data); kfree(ndd); put_device(dev); }
int mlx5_set_nic_vport_vlan_list(struct mlx5_core_dev *dev, u16 vport, u16 *vlan_list, int list_len) { void *in, *ctx; int i, err; int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) + MLX5_ST_SZ_BYTES(vlan_layout) * (int)list_len; int max_list_size = 1 << MLX5_CAP_GEN_MAX(dev, log_max_vlan_list); if (list_len > max_list_size) { mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n", list_len, max_list_size); return -ENOSPC; } in = mlx5_vzalloc(inlen); if (!in) { mlx5_core_warn(dev, "failed to allocate inbox\n"); return -ENOMEM; } MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); if (vport) MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); MLX5_SET(modify_nic_vport_context_in, in, field_select.addresses_list, 1); ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context); MLX5_SET(nic_vport_context, ctx, allowed_list_type, MLX5_NIC_VPORT_LIST_TYPE_VLAN); MLX5_SET(nic_vport_context, ctx, allowed_list_size, list_len); for (i = 0; i < list_len; i++) { u8 *vlan_lout = MLX5_ADDR_OF(nic_vport_context, ctx, current_uc_mac_address[i]); MLX5_SET(vlan_layout, vlan_lout, vlan, vlan_list[i]); } err = mlx5_modify_nic_vport_context(dev, in, inlen); kvfree(in); return err; }
static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param) { struct mlx5e_channel *c = sq->channel; struct mlx5e_priv *priv = c->priv; struct mlx5_core_dev *mdev = priv->mdev; void *in; void *sqc; void *wq; int inlen; int err; inlen = MLX5_ST_SZ_BYTES(create_sq_in) + sizeof(u64) * sq->wq_ctrl.buf.npages; in = mlx5_vzalloc(inlen); if (!in) return -ENOMEM; sqc = MLX5_ADDR_OF(create_sq_in, in, ctx); wq = MLX5_ADDR_OF(sqc, sqc, wq); memcpy(sqc, param->sqc, sizeof(param->sqc)); MLX5_SET(sqc, sqc, user_index, sq->tc); MLX5_SET(sqc, sqc, tis_num_0, priv->tisn[sq->tc]); MLX5_SET(sqc, sqc, cqn, c->sq[sq->tc].cq.mcq.cqn); MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); MLX5_SET(sqc, sqc, tis_lst_sz, 1); MLX5_SET(sqc, sqc, flush_in_error_en, 1); MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); MLX5_SET(wq, wq, uar_page, sq->uar.index); MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - PAGE_SHIFT); MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma); mlx5_fill_page_array(&sq->wq_ctrl.buf, (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); err = mlx5_create_sq(mdev, in, inlen, &sq->sqn); kvfree(in); return err; }
static int create_xrq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, struct mlx5_srq_attr *in) { u32 create_out[MLX5_ST_SZ_DW(create_xrq_out)] = {0}; void *create_in; void *xrqc; void *wq; int pas_size; int inlen; int err; pas_size = get_pas_size(in); inlen = MLX5_ST_SZ_BYTES(create_xrq_in) + pas_size; create_in = kvzalloc(inlen, GFP_KERNEL); if (!create_in) return -ENOMEM; xrqc = MLX5_ADDR_OF(create_xrq_in, create_in, xrq_context); wq = MLX5_ADDR_OF(xrqc, xrqc, wq); set_wq(wq, in); memcpy(MLX5_ADDR_OF(xrqc, xrqc, wq.pas), in->pas, pas_size); if (in->type == IB_SRQT_TM) { MLX5_SET(xrqc, xrqc, topology, MLX5_XRQC_TOPOLOGY_TAG_MATCHING); if (in->flags & MLX5_SRQ_FLAG_RNDV) MLX5_SET(xrqc, xrqc, offload, MLX5_XRQC_OFFLOAD_RNDV); MLX5_SET(xrqc, xrqc, tag_matching_topology_context.log_matching_list_sz, in->tm_log_list_size); } MLX5_SET(xrqc, xrqc, user_index, in->user_index); MLX5_SET(xrqc, xrqc, cqn, in->cqn); MLX5_SET(create_xrq_in, create_in, opcode, MLX5_CMD_OP_CREATE_XRQ); MLX5_SET(create_xrq_in, create_in, uid, in->uid); err = mlx5_cmd_exec(dev, create_in, inlen, create_out, sizeof(create_out)); kvfree(create_in); if (!err) { srq->srqn = MLX5_GET(create_xrq_out, create_out, xrqn); srq->uid = in->uid; } return err; }
int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid) { u32 *out; int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); out = kvzalloc(outlen, GFP_KERNEL); if (!out) return -ENOMEM; mlx5_query_nic_vport_context(mdev, 0, out, outlen); *node_guid = MLX5_GET64(query_nic_vport_context_out, out, nic_vport_context.node_guid); kvfree(out); return 0; }
int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu) { int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); void *in; int err; in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1); MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu); err = mlx5_modify_nic_vport_context(mdev, in, inlen); kvfree(in); return err; }
/* ll_free_user_pages - tear down page struct array * @pages: array of page struct pointers underlying target buffer */ static void ll_free_user_pages(struct page **pages, int npages, int do_dirty) { int i; for (i = 0; i < npages; i++) { if (pages[i] == NULL) break; if (do_dirty) set_page_dirty_lock(pages[i]); put_page(pages[i]); } #if defined(HAVE_DIRECTIO_ITER) || defined(HAVE_IOV_ITER_RW) kvfree(pages); #else OBD_FREE_LARGE(pages, npages * sizeof(*pages)); #endif }
int mlx5_set_vport_mtu(struct mlx5_core_dev *mdev, int mtu) { u32 *in; u32 inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); int err; in = mlx5_vzalloc(inlen); if (!in) return -ENOMEM; MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1); MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu); err = mlx5_modify_nic_vport_context(mdev, in, inlen); kvfree(in); return err; }
int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu) { int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); u32 *out; int err; out = mlx5_vzalloc(outlen); if (!out) return -ENOMEM; err = mlx5_query_nic_vport_context(mdev, 0, out, outlen); if (!err) *mtu = MLX5_GET(query_nic_vport_context_out, out, nic_vport_context.mtu); kvfree(out); return err; }
int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev, u16 *qkey_viol_cntr) { u32 *out; int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); out = kvzalloc(outlen, GFP_KERNEL); if (!out) return -ENOMEM; mlx5_query_nic_vport_context(mdev, 0, out, outlen); *qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out, nic_vport_context.qkey_violation_counter); kvfree(out); return 0; }
static int query_xrc_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, struct mlx5_query_srq_mbox_out *out) { u32 *xrcsrq_out; int err; xrcsrq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out)); if (!xrcsrq_out) return -ENOMEM; err = mlx5_core_query_xsrq(dev, srq->srqn, xrcsrq_out); if (err) goto out; out: kvfree(xrcsrq_out); return err; }
int mlx5_set_nic_vport_current_mac(struct mlx5_core_dev *mdev, int vport, bool other_vport, u8 *addr) { void *in; int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) + MLX5_ST_SZ_BYTES(mac_address_layout); u8 *mac_layout; u8 *mac_ptr; int err; in = mlx5_vzalloc(inlen); if (!in) { mlx5_core_warn(mdev, "failed to allocate inbox\n"); return -ENOMEM; } MLX5_SET(modify_nic_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); MLX5_SET(modify_nic_vport_context_in, in, other_vport, other_vport); MLX5_SET(modify_nic_vport_context_in, in, field_select.addresses_list, 1); MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.allowed_list_type, MLX5_NIC_VPORT_LIST_TYPE_UC); MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.allowed_list_size, 1); mac_layout = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context.current_uc_mac_address); mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_layout, mac_addr_47_32); ether_addr_copy(mac_ptr, addr); err = mlx5_modify_nic_vport_context(mdev, in, inlen); kvfree(in); return err; }
void ib_kvstore_riak_set_vclock(ib_kvstore_t *kvstore, char *vclock) { ib_kvstore_riak_server_t *riak; riak = (ib_kvstore_riak_server_t *)kvstore->server; if (riak->vclock) { kvfree(kvstore, riak->vclock); } if (vclock) { riak->vclock = kvmalloc(kvstore, strlen(vclock)+1); if (riak->vclock) { strcpy(riak->vclock, vclock); } } else { riak->vclock = NULL; } }
void ib_kvstore_riak_set_etag(ib_kvstore_t *kvstore, char *etag) { ib_kvstore_riak_server_t *riak; riak = (ib_kvstore_riak_server_t *)kvstore->server; if (riak->etag) { kvfree(kvstore, riak->etag); } if (etag) { riak->etag = kvmalloc(kvstore, strlen(etag)+1); if (riak->etag) { strcpy(riak->etag, etag); } } else { riak->etag = NULL; } }
void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring **pring) { struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_tx_ring *ring = *pring; en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); if (ring->bf_alloced) mlx4_bf_free(mdev->dev, &ring->bf); mlx4_qp_remove(mdev->dev, &ring->sp_qp); mlx4_qp_free(mdev->dev, &ring->sp_qp); mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1); mlx4_free_hwq_res(mdev->dev, &ring->sp_wqres, ring->buf_size); kfree(ring->bounce_buf); ring->bounce_buf = NULL; kvfree(ring->tx_info); ring->tx_info = NULL; kfree(ring); *pring = NULL; }
int mlx5_query_vport_counter(struct mlx5_core_dev *dev, u8 port_num, u16 vport_num, void *out, int out_size) { int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in); int is_group_manager; void *in; int err; is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager); in = mlx5_vzalloc(in_sz); if (!in) return -ENOMEM; MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER); if (vport_num) { if (is_group_manager) { MLX5_SET(query_vport_counter_in, in, other_vport, 1); MLX5_SET(query_vport_counter_in, in, vport_number, vport_num); } else { err = -EPERM; goto ex; } } if (MLX5_CAP_GEN(dev, num_ports) == 2) MLX5_SET(query_vport_counter_in, in, port_num, port_num); err = mlx5_cmd_exec(dev, in, in_sz, out, out_size); if (err) goto ex; err = mlx5_cmd_status_to_err_v2(out); if (err) goto ex; ex: kvfree(in); return err; }
static ib_status_t kvremove( ib_kvstore_t *kvstore, const ib_kvstore_key_t *key, ib_kvstore_cbdata_t *cbdata) { assert(kvstore); assert(key); char *url; ib_status_t rc; CURLcode curl_rc; ib_kvstore_riak_server_t *riak; rc = IB_OK; riak = (ib_kvstore_riak_server_t *)kvstore->server; url = build_key_url(kvstore, riak, key); curl_rc = curl_easy_setopt(riak->curl, CURLOPT_URL, url); if (curl_rc) { rc =IB_EOTHER; goto exit; } curl_rc = curl_easy_setopt(riak->curl, CURLOPT_CUSTOMREQUEST, "DELETE"); if (curl_rc) { rc =IB_EOTHER; goto exit; } curl_rc = curl_easy_perform(riak->curl); if (curl_rc) { rc =IB_EOTHER; goto exit; } exit: curl_easy_reset(riak->curl); kvfree(kvstore, url); return rc; }
static int mlx5_ib_set_cc_params(struct mlx5_ib_dev *dev, u8 port_num, int offset, u32 var) { int inlen = MLX5_ST_SZ_BYTES(modify_cong_params_in); void *in; void *field; enum mlx5_ib_cong_node_type node; struct mlx5_core_dev *mdev; u32 attr_mask = 0; int err; /* Takes a 1-based port number */ mdev = mlx5_ib_get_native_port_mdev(dev, port_num + 1, NULL); if (!mdev) return -ENODEV; in = kvzalloc(inlen, GFP_KERNEL); if (!in) { err = -ENOMEM; goto alloc_err; } MLX5_SET(modify_cong_params_in, in, opcode, MLX5_CMD_OP_MODIFY_CONG_PARAMS); node = mlx5_ib_param_to_node(offset); MLX5_SET(modify_cong_params_in, in, cong_protocol, node); field = MLX5_ADDR_OF(modify_cong_params_in, in, congestion_parameters); mlx5_ib_set_cc_param_mask_val(field, offset, var, &attr_mask); field = MLX5_ADDR_OF(modify_cong_params_in, in, field_select); MLX5_SET(field_select_r_roce_rp, field, field_select_r_roce_rp, attr_mask); err = mlx5_cmd_modify_cong_params(mdev, in, inlen); kvfree(in); alloc_err: mlx5_ib_put_native_port_mdev(dev, port_num + 1); return err; }
static int create_xrc_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, struct mlx5_srq_attr *in) { u32 create_out[MLX5_ST_SZ_DW(create_xrc_srq_out)]; void *create_in; void *xrc_srqc; void *pas; int pas_size; int inlen; int err; pas_size = get_pas_size(in); inlen = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size; create_in = kvzalloc(inlen, GFP_KERNEL); if (!create_in) return -ENOMEM; MLX5_SET(create_xrc_srq_in, create_in, uid, in->uid); xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in, xrc_srq_context_entry); pas = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas); set_srqc(xrc_srqc, in); MLX5_SET(xrc_srqc, xrc_srqc, user_index, in->user_index); memcpy(pas, in->pas, pas_size); MLX5_SET(create_xrc_srq_in, create_in, opcode, MLX5_CMD_OP_CREATE_XRC_SRQ); memset(create_out, 0, sizeof(create_out)); err = mlx5_cmd_exec(dev, create_in, inlen, create_out, sizeof(create_out)); if (err) goto out; srq->srqn = MLX5_GET(create_xrc_srq_out, create_out, xrc_srqn); srq->uid = in->uid; out: kvfree(create_in); return err; }
int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid) { u32 *out; int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); int err; out = mlx5_vzalloc(outlen); if (!out) return -ENOMEM; err = mlx5_query_nic_vport_context(mdev, 0, out, outlen); if (err) goto out; *node_guid = MLX5_GET64(query_nic_vport_context_out, out, nic_vport_context.node_guid); out: kvfree(out); return err; }
int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev, u8 port_num, void *out, size_t sz) { u32 *in; int err; in = mlx5_vzalloc(sz); if (!in) { err = -ENOMEM; return err; } MLX5_SET(ppcnt_reg, in, local_port, port_num); MLX5_SET(ppcnt_reg, in, grp, MLX5_INFINIBAND_PORT_COUNTERS_GROUP); err = mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); kvfree(in); return err; }
int mlx5_query_hca_vport_state(struct mlx5_core_dev *dev, u8 *vport_state) { u32 *out; int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out); int err; out = mlx5_vzalloc(outlen); if (!out) return -ENOMEM; err = mlx5_query_hca_vport_context(dev, 1, 0, out, outlen); if (err) goto out; *vport_state = MLX5_GET(query_hca_vport_context_out, out, hca_vport_context.vport_state); out: kvfree(out); return err; }
static void jffs2_put_super (struct super_block *sb) { struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); jffs2_dbg(2, "%s()\n", __func__); mutex_lock(&c->alloc_sem); jffs2_flush_wbuf_pad(c); mutex_unlock(&c->alloc_sem); jffs2_sum_exit(c); jffs2_free_ino_caches(c); jffs2_free_raw_node_refs(c); kvfree(c->blocks); jffs2_flash_cleanup(c); kfree(c->inocache_list); jffs2_clear_xattr_subsystem(c); mtd_sync(c->mtd); jffs2_dbg(1, "%s(): returning\n", __func__); }
/* .remove file hook fn to remove loaded policy */ static ssize_t profile_remove(struct file *f, const char __user *buf, size_t size, loff_t *pos) { char *data; ssize_t error; /* * aa_remove_profile needs a null terminated string so 1 extra * byte is allocated and the copied data is null terminated. */ data = aa_simple_write_to_buffer(OP_PROF_RM, buf, size + 1, size, pos); error = PTR_ERR(data); if (!IS_ERR(data)) { data[size] = 0; error = aa_remove_profiles(data, size); kvfree(data); } return error; }
static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev, enum mlx5_vport_roce_state state) { void *in; int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); int err; in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1); MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en, state); err = mlx5_modify_nic_vport_context(mdev, in, inlen); kvfree(in); return err; }