static void flexrm_shutdown(struct mbox_chan *chan) { u32 reqid; unsigned int timeout; struct brcm_message *msg; struct flexrm_ring *ring = chan->con_priv; /* Disable/inactivate ring */ writel_relaxed(0x0, ring->regs + RING_CONTROL); /* Flush ring with timeout of 1s */ timeout = 1000; writel_relaxed(BIT(CONTROL_FLUSH_SHIFT), ring->regs + RING_CONTROL); do { if (readl_relaxed(ring->regs + RING_FLUSH_DONE) & FLUSH_DONE_MASK) break; mdelay(1); } while (timeout--); /* Abort all in-flight requests */ for (reqid = 0; reqid < RING_MAX_REQ_COUNT; reqid++) { msg = ring->requests[reqid]; if (!msg) continue; /* Release reqid for recycling */ ring->requests[reqid] = NULL; ida_simple_remove(&ring->requests_ida, reqid); /* Unmap DMA mappings */ flexrm_dma_unmap(ring->mbox->dev, msg); /* Give-back message to mailbox client */ msg->error = -EIO; mbox_chan_received_data(chan, msg); } /* Release IRQ */ if (ring->irq_requested) { free_irq(ring->irq, ring); ring->irq_requested = false; } /* Free-up completion descriptor ring */ if (ring->cmpl_base) { dma_pool_free(ring->mbox->cmpl_pool, ring->cmpl_base, ring->cmpl_dma_base); ring->cmpl_base = NULL; } /* Free-up BD descriptor ring */ if (ring->bd_base) { dma_pool_free(ring->mbox->bd_pool, ring->bd_base, ring->bd_dma_base); ring->bd_base = NULL; } }
static int flexrm_new_request(struct flexrm_ring *ring, struct brcm_message *batch_msg, struct brcm_message *msg) { void *next; unsigned long flags; u32 val, count, nhcnt; u32 read_offset, write_offset; bool exit_cleanup = false; int ret = 0, reqid; /* Do sanity check on message */ if (!flexrm_sanity_check(msg)) return -EIO; msg->error = 0; /* If no requests possible then save data pointer and goto done. */ spin_lock_irqsave(&ring->lock, flags); reqid = bitmap_find_free_region(ring->requests_bmap, RING_MAX_REQ_COUNT, 0); spin_unlock_irqrestore(&ring->lock, flags); if (reqid < 0) return -ENOSPC; ring->requests[reqid] = msg; /* Do DMA mappings for the message */ ret = flexrm_dma_map(ring->mbox->dev, msg); if (ret < 0) { ring->requests[reqid] = NULL; spin_lock_irqsave(&ring->lock, flags); bitmap_release_region(ring->requests_bmap, reqid, 0); spin_unlock_irqrestore(&ring->lock, flags); return ret; } /* Determine current HW BD read offset */ read_offset = readl_relaxed(ring->regs + RING_BD_READ_PTR); val = readl_relaxed(ring->regs + RING_BD_START_ADDR); read_offset *= RING_DESC_SIZE; read_offset += (u32)(BD_START_ADDR_DECODE(val) - ring->bd_dma_base); /* * Number required descriptors = number of non-header descriptors + * number of header descriptors + * 1x null descriptor */ nhcnt = flexrm_estimate_nonheader_desc_count(msg); count = flexrm_estimate_header_desc_count(nhcnt) + nhcnt + 1; /* Check for available descriptor space. */ write_offset = ring->bd_write_offset; while (count) { if (!flexrm_is_next_table_desc(ring->bd_base + write_offset)) count--; write_offset += RING_DESC_SIZE; if (write_offset == RING_BD_SIZE) write_offset = 0x0; if (write_offset == read_offset) break; } if (count) { ret = -ENOSPC; exit_cleanup = true; goto exit; } /* Write descriptors to ring */ next = flexrm_write_descs(msg, nhcnt, reqid, ring->bd_base + ring->bd_write_offset, RING_BD_TOGGLE_VALID(ring->bd_write_offset), ring->bd_base, ring->bd_base + RING_BD_SIZE); if (IS_ERR(next)) { ret = PTR_ERR(next); exit_cleanup = true; goto exit; } /* Save ring BD write offset */ ring->bd_write_offset = (unsigned long)(next - ring->bd_base); /* Increment number of messages sent */ atomic_inc_return(&ring->msg_send_count); exit: /* Update error status in message */ msg->error = ret; /* Cleanup if we failed */ if (exit_cleanup) { flexrm_dma_unmap(ring->mbox->dev, msg); ring->requests[reqid] = NULL; spin_lock_irqsave(&ring->lock, flags); bitmap_release_region(ring->requests_bmap, reqid, 0); spin_unlock_irqrestore(&ring->lock, flags); } return ret; }
static void flexrm_shutdown(struct mbox_chan *chan) { u32 reqid; unsigned int timeout; struct brcm_message *msg; struct flexrm_ring *ring = chan->con_priv; /* Disable/inactivate ring */ writel_relaxed(0x0, ring->regs + RING_CONTROL); /* Set ring flush state */ timeout = 1000; /* timeout of 1s */ writel_relaxed(BIT(CONTROL_FLUSH_SHIFT), ring->regs + RING_CONTROL); do { if (readl_relaxed(ring->regs + RING_FLUSH_DONE) & FLUSH_DONE_MASK) break; mdelay(1); } while (--timeout); if (!timeout) dev_err(ring->mbox->dev, "setting ring%d flush state timedout\n", ring->num); /* Clear ring flush state */ timeout = 1000; /* timeout of 1s */ writel_relaxed(0x0, ring + RING_CONTROL); do { if (!(readl_relaxed(ring + RING_FLUSH_DONE) & FLUSH_DONE_MASK)) break; mdelay(1); } while (--timeout); if (!timeout) dev_err(ring->mbox->dev, "clearing ring%d flush state timedout\n", ring->num); /* Abort all in-flight requests */ for (reqid = 0; reqid < RING_MAX_REQ_COUNT; reqid++) { msg = ring->requests[reqid]; if (!msg) continue; /* Release reqid for recycling */ ring->requests[reqid] = NULL; /* Unmap DMA mappings */ flexrm_dma_unmap(ring->mbox->dev, msg); /* Give-back message to mailbox client */ msg->error = -EIO; mbox_chan_received_data(chan, msg); } /* Clear requests bitmap */ bitmap_zero(ring->requests_bmap, RING_MAX_REQ_COUNT); /* Release IRQ */ if (ring->irq_requested) { irq_set_affinity_hint(ring->irq, NULL); free_irq(ring->irq, ring); ring->irq_requested = false; } /* Free-up completion descriptor ring */ if (ring->cmpl_base) { dma_pool_free(ring->mbox->cmpl_pool, ring->cmpl_base, ring->cmpl_dma_base); ring->cmpl_base = NULL; } /* Free-up BD descriptor ring */ if (ring->bd_base) { dma_pool_free(ring->mbox->bd_pool, ring->bd_base, ring->bd_dma_base); ring->bd_base = NULL; } }
static int flexrm_process_completions(struct flexrm_ring *ring) { u64 desc; int err, count = 0; unsigned long flags; struct brcm_message *msg = NULL; u32 reqid, cmpl_read_offset, cmpl_write_offset; struct mbox_chan *chan = &ring->mbox->controller.chans[ring->num]; spin_lock_irqsave(&ring->lock, flags); /* * Get current completion read and write offset * * Note: We should read completion write pointer atleast once * after we get a MSI interrupt because HW maintains internal * MSI status which will allow next MSI interrupt only after * completion write pointer is read. */ cmpl_write_offset = readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR); cmpl_write_offset *= RING_DESC_SIZE; cmpl_read_offset = ring->cmpl_read_offset; ring->cmpl_read_offset = cmpl_write_offset; spin_unlock_irqrestore(&ring->lock, flags); /* For each completed request notify mailbox clients */ reqid = 0; while (cmpl_read_offset != cmpl_write_offset) { /* Dequeue next completion descriptor */ desc = *((u64 *)(ring->cmpl_base + cmpl_read_offset)); /* Next read offset */ cmpl_read_offset += RING_DESC_SIZE; if (cmpl_read_offset == RING_CMPL_SIZE) cmpl_read_offset = 0; /* Decode error from completion descriptor */ err = flexrm_cmpl_desc_to_error(desc); if (err < 0) { dev_warn(ring->mbox->dev, "ring%d got completion desc=0x%lx with error %d\n", ring->num, (unsigned long)desc, err); } /* Determine request id from completion descriptor */ reqid = flexrm_cmpl_desc_to_reqid(desc); /* Determine message pointer based on reqid */ msg = ring->requests[reqid]; if (!msg) { dev_warn(ring->mbox->dev, "ring%d null msg pointer for completion desc=0x%lx\n", ring->num, (unsigned long)desc); continue; } /* Release reqid for recycling */ ring->requests[reqid] = NULL; spin_lock_irqsave(&ring->lock, flags); bitmap_release_region(ring->requests_bmap, reqid, 0); spin_unlock_irqrestore(&ring->lock, flags); /* Unmap DMA mappings */ flexrm_dma_unmap(ring->mbox->dev, msg); /* Give-back message to mailbox client */ msg->error = err; mbox_chan_received_data(chan, msg); /* Increment number of completions processed */ atomic_inc_return(&ring->msg_cmpl_count); count++; } return count; }