static void release_gid_table(struct ib_device *device, u8 port, struct ib_gid_table *table) { bool leak = false; int i; if (!table) return; for (i = 0; i < table->sz; i++) { if (is_gid_entry_free(table->data_vec[i])) continue; if (kref_read(&table->data_vec[i]->kref) > 1) { dev_err(&device->dev, "GID entry ref leak for index %d ref=%d\n", i, kref_read(&table->data_vec[i]->kref)); leak = true; } } if (leak) return; kfree(table->data_vec); kfree(table); }
void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr) { struct amdgpu_ctx *ctx; struct idr *idp; uint32_t id, i; idp = &mgr->ctx_handles; idr_for_each_entry(idp, ctx, id) { if (!ctx->adev) return; for (i = 0; i < ctx->adev->num_rings; i++) { if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) continue; if (kref_read(&ctx->refcount) == 1) drm_sched_entity_cleanup(&ctx->adev->rings[i]->sched, &ctx->rings[i].entity); else DRM_ERROR("ctx %p is still alive\n", ctx); } } }
static void qedf_rrq_compl(struct qedf_els_cb_arg *cb_arg) { struct qedf_ioreq *orig_io_req; struct qedf_ioreq *rrq_req; struct qedf_ctx *qedf; int refcount; rrq_req = cb_arg->io_req; qedf = rrq_req->fcport->qedf; QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered.\n"); orig_io_req = cb_arg->aborted_io_req; if (!orig_io_req) goto out_free; if (rrq_req->event != QEDF_IOREQ_EV_ELS_TMO && rrq_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT) cancel_delayed_work_sync(&orig_io_req->timeout_work); refcount = kref_read(&orig_io_req->refcount); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "rrq_compl: orig io = %p," " orig xid = 0x%x, rrq_xid = 0x%x, refcount=%d\n", orig_io_req, orig_io_req->xid, rrq_req->xid, refcount); /* This should return the aborted io_req to the command pool */ if (orig_io_req) kref_put(&orig_io_req->refcount, qedf_release_cmd); out_free: kfree(cb_arg); }
/** * drm_mode_object_get - acquire a mode object reference * @obj: DRM mode object * * This function increments the object's refcount if it is a refcounted modeset * object. It is a no-op on any other object. References should be dropped again * by calling drm_mode_object_put(). */ void drm_mode_object_get(struct drm_mode_object *obj) { if (obj->free_cb) { DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, kref_read(&obj->refcount)); kref_get(&obj->refcount); } }
/** * pci_destroy_slot - decrement refcount for physical PCI slot * @slot: struct pci_slot to decrement * * %struct pci_slot is refcounted, so destroying them is really easy; we * just call kobject_put on its kobj and let our release methods do the * rest. */ void pci_destroy_slot(struct pci_slot *slot) { dev_dbg(&slot->bus->dev, "dev %02x, dec refcount to %d\n", slot->number, kref_read(&slot->kobj.kref) - 1); mutex_lock(&pci_slot_mutex); kobject_put(&slot->kobj); mutex_unlock(&pci_slot_mutex); }
static ssize_t usnic_ib_show_max_vf(struct device *device, struct device_attribute *attr, char *buf) { struct usnic_ib_dev *us_ibdev; us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev); return scnprintf(buf, PAGE_SIZE, "%u\n", kref_read(&us_ibdev->vf_cnt)); }
struct siw_qp *siw_qp_id2obj(struct siw_dev *sdev, int id) { struct siw_objhdr *obj = siw_get_obj(&sdev->qp_idr, id); if (obj) { pr_debug(DBG_OBJ "(QP%d): New refcount: %d\n", obj->id, kref_read(&obj->ref)); return container_of(obj, struct siw_qp, hdr); } return NULL; }
void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) { unsigned num_entities = amdgput_ctx_total_num_entities(); struct amdgpu_ctx *ctx; struct idr *idp; uint32_t id, i; idp = &mgr->ctx_handles; idr_for_each_entry(idp, ctx, id) { if (kref_read(&ctx->refcount) != 1) { DRM_ERROR("ctx %p is still alive\n", ctx); continue; } for (i = 0; i < num_entities; i++) drm_sched_entity_fini(&ctx->entities[0][i].entity); } }
static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg) { struct qedf_ioreq *orig_io_req; struct qedf_ioreq *rec_req; struct qedf_mp_req *mp_req; struct fc_frame_header *mp_fc_hdr, *fh; struct fc_frame *fp; void *resp_buf, *fc_payload; u32 resp_len; struct fc_lport *lport; struct qedf_ctx *qedf; int refcount; enum fc_rctl r_ctl; struct fc_els_ls_rjt *rjt; struct fc_els_rec_acc *acc; u8 opcode; u32 offset, e_stat; struct scsi_cmnd *sc_cmd; bool srr_needed = false; rec_req = cb_arg->io_req; qedf = rec_req->fcport->qedf; lport = qedf->lport; orig_io_req = cb_arg->aborted_io_req; if (!orig_io_req) goto out_free; if (rec_req->event != QEDF_IOREQ_EV_ELS_TMO && rec_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT) cancel_delayed_work_sync(&orig_io_req->timeout_work); refcount = kref_read(&orig_io_req->refcount); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p," " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n", orig_io_req, orig_io_req->xid, rec_req->xid, refcount); /* If a REC times out, free resources */ if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO) goto out_put; /* Normalize response data into struct fc_frame */ mp_req = &(rec_req->mp_req); mp_fc_hdr = &(mp_req->resp_fc_hdr); resp_len = mp_req->resp_len; acc = resp_buf = mp_req->resp_buf; fp = fc_frame_alloc(lport, resp_len); if (!fp) { QEDF_ERR(&(qedf->dbg_ctx), "fc_frame_alloc failure.\n"); goto out_put; } /* Copy frame header from firmware into fp */ fh = (struct fc_frame_header *)fc_frame_header_get(fp); memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header)); /* Copy payload from firmware into fp */ fc_payload = fc_frame_payload_get(fp, resp_len); memcpy(fc_payload, resp_buf, resp_len); opcode = fc_frame_payload_op(fp); if (opcode == ELS_LS_RJT) { rjt = fc_frame_payload_get(fp, sizeof(*rjt)); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Received LS_RJT for REC: er_reason=0x%x, " "er_explan=0x%x.\n", rjt->er_reason, rjt->er_explan); /* * The following response(s) mean that we need to reissue the * request on another exchange. We need to do this without * informing the upper layers lest it cause an application * error. */ if ((rjt->er_reason == ELS_RJT_LOGIC || rjt->er_reason == ELS_RJT_UNAB) && rjt->er_explan == ELS_EXPL_OXID_RXID) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Handle CMD LOST case.\n"); qedf_requeue_io_req(orig_io_req); } } else if (opcode == ELS_LS_ACC) { offset = ntohl(acc->reca_fc4value); e_stat = ntohl(acc->reca_e_stat); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Received LS_ACC for REC: offset=0x%x, e_stat=0x%x.\n", offset, e_stat); if (e_stat & ESB_ST_SEQ_INIT) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Target has the seq init\n"); goto out_free_frame; } sc_cmd = orig_io_req->sc_cmd; if (!sc_cmd) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "sc_cmd is NULL for xid=0x%x.\n", orig_io_req->xid); goto out_free_frame; } /* SCSI write case */ if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { if (offset == orig_io_req->data_xfer_len) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "WRITE - response lost.\n"); r_ctl = FC_RCTL_DD_CMD_STATUS; srr_needed = true; offset = 0; } else { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "WRITE - XFER_RDY/DATA lost.\n"); r_ctl = FC_RCTL_DD_DATA_DESC; /* Use data from warning CQE instead of REC */ offset = orig_io_req->tx_buf_off; } /* SCSI read case */ } else { if (orig_io_req->rx_buf_off == orig_io_req->data_xfer_len) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "READ - response lost.\n"); srr_needed = true; r_ctl = FC_RCTL_DD_CMD_STATUS; offset = 0; } else { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "READ - DATA lost.\n"); /* * For read case we always set the offset to 0 * for sequence recovery task. */ offset = 0; r_ctl = FC_RCTL_DD_SOL_DATA; } } if (srr_needed) qedf_send_srr(orig_io_req, offset, r_ctl); else qedf_initiate_seq_cleanup(orig_io_req, offset, r_ctl); } out_free_frame: fc_frame_free(fp); out_put: /* Put reference for original command since REC completed */ kref_put(&orig_io_req->refcount, qedf_release_cmd); out_free: kfree(cb_arg); }
static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg) { struct qedf_ioreq *orig_io_req; struct qedf_ioreq *srr_req; struct qedf_mp_req *mp_req; struct fc_frame_header *mp_fc_hdr, *fh; struct fc_frame *fp; void *resp_buf, *fc_payload; u32 resp_len; struct fc_lport *lport; struct qedf_ctx *qedf; int refcount; u8 opcode; srr_req = cb_arg->io_req; qedf = srr_req->fcport->qedf; lport = qedf->lport; orig_io_req = cb_arg->aborted_io_req; if (!orig_io_req) goto out_free; clear_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags); if (srr_req->event != QEDF_IOREQ_EV_ELS_TMO && srr_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT) cancel_delayed_work_sync(&orig_io_req->timeout_work); refcount = kref_read(&orig_io_req->refcount); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p," " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n", orig_io_req, orig_io_req->xid, srr_req->xid, refcount); /* If a SRR times out, simply free resources */ if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO) goto out_put; /* Normalize response data into struct fc_frame */ mp_req = &(srr_req->mp_req); mp_fc_hdr = &(mp_req->resp_fc_hdr); resp_len = mp_req->resp_len; resp_buf = mp_req->resp_buf; fp = fc_frame_alloc(lport, resp_len); if (!fp) { QEDF_ERR(&(qedf->dbg_ctx), "fc_frame_alloc failure.\n"); goto out_put; } /* Copy frame header from firmware into fp */ fh = (struct fc_frame_header *)fc_frame_header_get(fp); memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header)); /* Copy payload from firmware into fp */ fc_payload = fc_frame_payload_get(fp, resp_len); memcpy(fc_payload, resp_buf, resp_len); opcode = fc_frame_payload_op(fp); switch (opcode) { case ELS_LS_ACC: QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "SRR success.\n"); break; case ELS_LS_RJT: QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS, "SRR rejected.\n"); qedf_initiate_abts(orig_io_req, true); break; } fc_frame_free(fp); out_put: /* Put reference for original command since SRR completed */ kref_put(&orig_io_req->refcount, qedf_release_cmd); out_free: kfree(cb_arg); }
/* * Report the configuration for this PF */ static ssize_t usnic_ib_show_config(struct device *device, struct device_attribute *attr, char *buf) { struct usnic_ib_dev *us_ibdev; char *ptr; unsigned left; unsigned n; enum usnic_vnic_res_type res_type; us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev); /* Buffer space limit is 1 page */ ptr = buf; left = PAGE_SIZE; mutex_lock(&us_ibdev->usdev_lock); if (kref_read(&us_ibdev->vf_cnt) > 0) { char *busname; /* * bus name seems to come with annoying prefix. * Remove it if it is predictable */ busname = us_ibdev->pdev->bus->name; if (strncmp(busname, "PCI Bus ", 8) == 0) busname += 8; n = scnprintf(ptr, left, "%s: %s:%d.%d, %s, %pM, %u VFs\n Per VF:", us_ibdev->ib_dev.name, busname, PCI_SLOT(us_ibdev->pdev->devfn), PCI_FUNC(us_ibdev->pdev->devfn), netdev_name(us_ibdev->netdev), us_ibdev->ufdev->mac, kref_read(&us_ibdev->vf_cnt)); UPDATE_PTR_LEFT(n, ptr, left); for (res_type = USNIC_VNIC_RES_TYPE_EOL; res_type < USNIC_VNIC_RES_TYPE_MAX; res_type++) { if (us_ibdev->vf_res_cnt[res_type] == 0) continue; n = scnprintf(ptr, left, " %d %s%s", us_ibdev->vf_res_cnt[res_type], usnic_vnic_res_type_to_str(res_type), (res_type < (USNIC_VNIC_RES_TYPE_MAX - 1)) ? "," : ""); UPDATE_PTR_LEFT(n, ptr, left); } n = scnprintf(ptr, left, "\n"); UPDATE_PTR_LEFT(n, ptr, left); } else { n = scnprintf(ptr, left, "%s: no VFs\n", us_ibdev->ib_dev.name); UPDATE_PTR_LEFT(n, ptr, left); } mutex_unlock(&us_ibdev->usdev_lock); return ptr - buf; }
void siw_pd_put(struct siw_pd *pd) { pr_debug(DBG_OBJ "(PD%d): Old refcount: %d\n", OBJ_ID(pd), kref_read(&pd->hdr.ref)); kref_put(&pd->hdr.ref, siw_free_pd); }
void siw_qp_put(struct siw_qp *qp) { pr_debug(DBG_OBJ "(QP%d): Old refcount: %d\n", QP_ID(qp), kref_read(&qp->hdr.ref)); kref_put(&qp->hdr.ref, siw_free_qp); }
void siw_cq_put(struct siw_cq *cq) { pr_debug(DBG_OBJ "(CQ%d): Old refcount: %d\n", OBJ_ID(cq), kref_read(&cq->hdr.ref)); kref_put(&cq->hdr.ref, siw_free_cq); }