/* This saves a lot of CPU/memory cycles on both sides */ static void bbc_free(struct nvshm_handle *handle, struct nvshm_iobuf *iob) { unsigned long f; spin_lock_irqsave(&alloc.lock, f); alloc.free_count++; if (alloc.bbc_pool_head) { alloc.bbc_pool_tail->next = NVSHM_A2B(handle, iob); alloc.bbc_pool_tail = iob; } else { alloc.bbc_pool_head = alloc.bbc_pool_tail = iob; } spin_unlock_irqrestore(&alloc.lock, f); if (alloc.free_count > NVSHM_MAX_FREE_PENDING) nvshm_iobuf_bbc_free(handle); }
/* * Called from IPC workqueue */ void nvshm_process_queue(struct nvshm_handle *handle) { unsigned long f; struct nvshm_iobuf *iob; struct nvshm_if_operations *ops; int chan; spin_lock_irqsave(&handle->lock, f); iob = nvshm_queue_get(handle); while (iob) { pr_debug("%s %p/%d/%d/%d->%p\n", __func__, iob, iob->chan, iob->length, iob->ref, iob->next); tegra_bb_clear_ipc(handle->tegra_bb); chan = iob->chan; if (iob->pool_id < NVSHM_AP_POOL_ID) { ops = handle->chan[chan].ops; if (ops) { spin_unlock_irqrestore(&handle->lock, f); ops->rx_event( &handle->chan[chan], iob); spin_lock_irqsave(&handle->lock, f); } else { nvshm_iobuf_free_cluster( iob); } } else { /* freed iobuf can form a tree */ /* Process attached iobufs but do not touch iob */ /* as it will be freed by next queue_get */ if (iob->next) { nvshm_iobuf_process_freed( NVSHM_B2A(handle, iob->next)); } } iob = nvshm_queue_get(handle); } spin_unlock_irqrestore(&handle->lock, f); /* Finalize BBC free */ nvshm_iobuf_bbc_free(handle); }