void nvshm_iobuf_dump(struct nvshm_iobuf *iob) { struct nvshm_handle *priv = nvshm_get_handle(); pr_err("iobuf (0x%p) dump:\n", NVSHM_A2B(priv, iob)); pr_err("\t data = 0x%p (%s)\n", iob->npduData, give_pointer_location(priv, iob->npduData)); pr_err("\t length = %d\n", iob->length); pr_err("\t offset = %d\n", iob->dataOffset); pr_err("\t total_len = %d\n", iob->totalLength); pr_err("\t ref = %d\n", iob->ref); pr_err("\t pool_id = %d (%s)\n", iob->pool_id, (iob->pool_id < NVSHM_AP_POOL_ID) ? "BBC" : "AP"); pr_err("\t next = 0x%p (%s)\n", iob->next, give_pointer_location(priv, iob->next)); pr_err("\t sg_next = 0x%p (%s)\n", iob->sg_next, give_pointer_location(priv, iob->sg_next)); pr_err("\t flags = 0x%x\n", iob->flags); pr_err("\t _size = %d\n", iob->_size); pr_err("\t _handle = 0x%p\n", iob->_handle); pr_err("\t _reserved = 0x%x\n", iob->_reserved); pr_err("\t qnext = 0x%p (%s)\n", iob->qnext, give_pointer_location(priv, iob->qnext)); pr_err("\t chan = 0x%x\n", iob->chan); pr_err("\t qflags = 0x%x\n", iob->qflags); }
int nvshm_iobuf_unref_cluster(struct nvshm_iobuf *iob) { int ref, ret = 0; struct nvshm_iobuf *_phy_list, *_phy_leaf; struct nvshm_handle *handle = nvshm_get_handle(); _phy_list = iob; while (_phy_list) { _phy_leaf = _phy_list; while (_phy_leaf) { ref = nvshm_iobuf_unref(_phy_leaf); ret = (ref > ret) ? ref : ret; if (_phy_leaf->sg_next) { _phy_leaf = NVSHM_B2A(handle, _phy_leaf->sg_next); } else { _phy_leaf = NULL; } } if (_phy_list->next) _phy_list = NVSHM_B2A(handle, _phy_list->next); else _phy_list = NULL; } return ret; }
/** Single iobuf free - do not follow iobuf links */ void nvshm_iobuf_free(struct nvshm_iobuf *desc) { struct nvshm_handle *priv = nvshm_get_handle(); int callback = 0, chan; unsigned long f; if (desc->ref == 0) { pr_err("%s: freeing an already freed iobuf (0x%x)\n", __func__, (unsigned int)desc); return; } spin_lock_irqsave(&alloc.lock, f); pr_debug("%s: free 0x%p ref %d pool %x\n", __func__, desc, desc->ref, desc->pool_id); desc->ref--; chan = desc->chan; if (desc->ref == 0) { if (desc->pool_id >= NVSHM_AP_POOL_ID) { /* update rate counter */ if ((chan >= 0) && (chan < priv->chan_count)) { if ((priv->chan[chan].rate_counter++ == NVSHM_RATE_LIMIT_TRESHOLD) && (priv->chan[chan].xoff)) { priv->chan[chan].xoff = 0; callback = 1; } } desc->sg_next = NULL; desc->next = NULL; desc->length = 0; desc->flags = 0; desc->dataOffset = 0; desc->chan = 0; if (alloc.free_pool_tail) { alloc.free_pool_tail->next = NVSHM_A2B(priv, desc); alloc.free_pool_tail = desc; } else { alloc.free_pool_head = desc; alloc.free_pool_tail = desc; } } else { /* iobuf belongs to other side */ pr_debug("%s: re-queue freed buffer\n", __func__); desc->sg_next = NULL; desc->next = NULL; desc->length = 0; desc->dataOffset = 0; spin_unlock_irqrestore(&alloc.lock, f); bbc_free(priv, desc); return; } } spin_unlock_irqrestore(&alloc.lock, f); if (callback) nvshm_start_tx(&priv->chan[chan]); }
struct nvshm_iobuf *nvshm_iobuf_alloc(struct nvshm_channel *chan, int size) { struct nvshm_handle *handle = nvshm_get_handle(); struct nvshm_iobuf *desc = NULL; unsigned long f; spin_lock_irqsave(&alloc.lock, f); if (alloc.free_pool_head) { int check = nvshm_iobuf_check(alloc.free_pool_head); if (check) { spin_unlock_irqrestore(&alloc.lock, f); pr_err("%s: iobuf check ret %d\n", __func__, check); return NULL; } if (size > (alloc.free_pool_head->total_length - NVSHM_DEFAULT_OFFSET)) { spin_unlock_irqrestore(&alloc.lock, f); pr_err("%s: requested size (%d > %d) too big\n", __func__, size, alloc.free_pool_head->total_length - NVSHM_DEFAULT_OFFSET); if (chan->ops) { chan->ops->error_event(chan, NVSHM_IOBUF_ERROR); } return desc; } desc = alloc.free_pool_head; alloc.free_pool_head = desc->next; if (alloc.free_pool_head) { alloc.free_pool_head = NVSHM_B2A(handle, alloc.free_pool_head); } else { pr_debug("%s end of alloc queue - clearing tail\n", __func__); alloc.free_pool_tail = NULL; } desc->length = 0; desc->flags = 0; desc->data_offset = NVSHM_DEFAULT_OFFSET; desc->sg_next = NULL; desc->next = NULL; desc->ref = 1; } else { spin_unlock_irqrestore(&alloc.lock, f); pr_err("%s: no more alloc space\n", __func__); /* No error since it's only Xoff situation */ return desc; } spin_unlock_irqrestore(&alloc.lock, f); return desc; }
/** Returned iobuf are already freed - just process them */ void nvshm_iobuf_process_freed(struct nvshm_iobuf *desc) { struct nvshm_handle *priv = nvshm_get_handle(); unsigned long f; while (desc) { int callback = 0, chan; struct nvshm_iobuf *next = desc->next; if (desc->ref != 0) { pr_err("%s: BBC returned an non freed iobuf (0x%x)\n", __func__, (unsigned int)desc); return; } chan = desc->chan; spin_lock_irqsave(&alloc.lock, f); /* update rate counter */ if ((chan >= 0) && (chan < priv->chan_count)) { if ((priv->chan[chan].rate_counter++ == NVSHM_RATE_LIMIT_TRESHOLD) && (priv->chan[chan].xoff)) { priv->chan[chan].xoff = 0; callback = 1; } } desc->sg_next = NULL; desc->next = NULL; desc->length = 0; desc->flags = 0; desc->dataOffset = 0; desc->chan = 0; if (alloc.free_pool_tail) { alloc.free_pool_tail->next = NVSHM_A2B(priv, desc); alloc.free_pool_tail = desc; } else { alloc.free_pool_head = desc; alloc.free_pool_tail = desc; } spin_unlock_irqrestore(&alloc.lock, f); if (callback) nvshm_start_tx(&priv->chan[chan]); if (next) { desc = NVSHM_B2A(priv, next); } else { desc = next; } } }
void nvshm_close_channel(struct nvshm_channel *handle) { unsigned long f; struct nvshm_handle *priv = nvshm_get_handle(); /* we cannot flush the work queue here as the call to nvshm_close_channel() is made from cleanup_interfaces(), which executes from the context of the work queue additionally, flushing the work queue is unnecessary here as the main work queue handler always checks the state of the IPC */ spin_lock_irqsave(&priv->lock, f); /* Clear ops but not data as it may be used for cleanup */ priv->chan[handle->index].ops = NULL; spin_unlock_irqrestore(&priv->lock, f); }
int nvshm_write(struct nvshm_channel *handle, struct nvshm_iobuf *iob) { unsigned long f; struct nvshm_handle *priv = nvshm_get_handle(); struct nvshm_iobuf *list, *leaf; int count = 0, ret = 0; spin_lock_irqsave(&priv->lock, f); if (!priv->chan[handle->index].ops) { pr_err("%s: channel not mapped\n", __func__); spin_unlock_irqrestore(&priv->lock, f); return -EINVAL; } list = iob; while (list) { count++; list->chan = handle->index; leaf = list->sg_next; while (leaf) { count++; leaf = NVSHM_B2A(priv, leaf); leaf->chan = handle->index; leaf = leaf->sg_next; } list = list->next; if (list) list = NVSHM_B2A(priv, list); } priv->chan[handle->index].rate_counter -= count; if (priv->chan[handle->index].rate_counter < 0) { priv->chan[handle->index].xoff = 1; pr_warn("%s: rate limit hit on chan %d\n", __func__, handle->index); ret = 1; } iob->qnext = NULL; nvshm_queue_put(priv, iob); nvshm_generate_ipc(priv); spin_unlock_irqrestore(&priv->lock, f); return ret; }
struct nvshm_channel *nvshm_open_channel(int chan, struct nvshm_if_operations *ops, void *interface_data) { unsigned long f; struct nvshm_handle *handle = nvshm_get_handle(); pr_debug("%s(%d)\n", __func__, chan); spin_lock_irqsave(&handle->lock, f); if (handle->chan[chan].ops) { spin_unlock_irqrestore(&handle->lock, f); pr_err("%s: already registered on chan %d\n", __func__, chan); return NULL; } handle->chan[chan].ops = ops; handle->chan[chan].data = interface_data; spin_unlock_irqrestore(&handle->lock, f); return &handle->chan[chan]; }
void nvshm_iobuf_free_cluster(struct nvshm_iobuf *list) { struct nvshm_handle *priv = nvshm_get_handle(); struct nvshm_iobuf *_phy_list, *_to_free, *leaf; int n = 0; _phy_list = list; while (_phy_list) { _to_free = list; if (list->sg_next) { _phy_list = list->sg_next; if (_phy_list) { leaf = NVSHM_B2A(priv, _phy_list); leaf->next = list->next; } } else { _phy_list = list->next; } list = NVSHM_B2A(priv, _phy_list); n++; nvshm_iobuf_free(_to_free); } }
int nvshm_iobuf_check(struct nvshm_iobuf *iob) { struct nvshm_handle *priv = nvshm_get_handle(); struct nvshm_iobuf *bbiob; int ret = 0; /* Check iobuf is in IPC space */ if (ADDR_OUTSIDE(iob, priv->ipc_base_virt, priv->ipc_size)) { pr_err("%s: iob @ check failed 0x%lx\n", __func__, (long)iob); return -1; } bbiob = NVSHM_A2B(priv, iob); if (ADDR_OUTSIDE(iob->npduData, NVSHM_IPC_BB_BASE, priv->ipc_size)) { pr_err("%s 0x%lx: npduData @ check failed 0x%lx\n", __func__, (long)bbiob, (long)iob->npduData); ret = -2; goto dump; } if (ADDR_OUTSIDE(iob->npduData + iob->dataOffset, NVSHM_IPC_BB_BASE, priv->ipc_size)) { pr_err("%s 0x%lx: npduData + offset @ check failed 0x%lx/0x%lx\n", __func__, (long)bbiob, (long)iob->npduData, (long)iob->dataOffset); ret = -3; goto dump; } if (iob->next) { if (ADDR_OUTSIDE(iob->next, NVSHM_IPC_BB_BASE, priv->ipc_size)) { pr_err("%s 0x%lx: next @ check failed 0x%lx\n", __func__, (long)bbiob, (long)iob->next); ret = -4; goto dump; } } if (iob->sg_next) { if (ADDR_OUTSIDE(iob->sg_next, NVSHM_IPC_BB_BASE, priv->ipc_size)) { pr_err("%s 0x%lx:sg_next @ check failed 0x%lx\n", __func__, (long)bbiob, (long)iob->sg_next); ret = -5; goto dump; } } if (iob->qnext) { if (ADDR_OUTSIDE(iob->qnext, NVSHM_IPC_BB_BASE, priv->ipc_size)) { pr_err("%s 0x%lx:qnext @ check failed 0x%lx\n", __func__, (long)bbiob, (long)iob->qnext); ret = -6; goto dump; } } return ret; dump: nvshm_iobuf_dump(iob); return ret; }