static void ps2kdma_send(struct kdma_buffer *kdb, int len) { unsigned long flags; int alen; struct kdma_request *kreq = kdb->kreq; spin_lock_irqsave(&kdb->lock, flags); alen = sizeof(struct kdma_request) + DMA_ALIGN(len); kdb->top = (void *)kreq + alen; spin_unlock_irqrestore(&kdb->lock, flags); up(&kdb->sem); init_dma_request(&kreq->r, &kdma_send_ops); kreq->next = (void *)kreq + alen; kreq->kdb = kdb; kreq->qwc = len >> 4; ps2dma_add_queue((struct dma_request *)kreq, kdb->channel); if (kdb->error) { kdb->error = 0; printk("ps2dma: %s timeout\n", kdb->channel->device); } }
struct dma_addr sos_dma_malloc (void* cookie, uint32_t size, int cached) { static int alloc_cached = 0; struct dma_addr dma_mem; (void)cookie; assert(_dma_pstart); _dma_pnext = DMA_ALIGN(_dma_pnext); if (_dma_pnext < _dma_pend) { /* If caching policy has changed we round to page boundary */ if (alloc_cached != cached && PAGE_OFFSET(_dma_pnext) != 0) { _dma_pnext = ROUND_UP (_dma_pnext, seL4_PageBits); } alloc_cached = cached; /* no longer need don't need dma_fill since rootsvr does this for us * if we fault */ dma_mem.phys = (eth_paddr_t)_dma_pnext; dma_mem.virt = (eth_vaddr_t)VIRT(dma_mem.phys); _dma_pnext += size; } else { dma_mem.phys = 0; dma_mem.virt = 0; } return dma_mem; }
static void *ps2kdma_alloc(struct kdma_buffer *kdb, int min, int max, int *size) { unsigned long flags; int free, amin; int poll; save_flags(flags); #ifdef __mips__ /* polling wait is used when * - called from interrupt handler * - interrupt is already disabled (in printk()) */ poll = in_interrupt() | !(flags & ST0_IE); #else #error "for MIPS CPU only" #endif if (down_trylock(&kdb->sem) != 0) { if (poll) return NULL; /* cannot sleep */ else down(&kdb->sem); } amin = DMA_ALIGN(min) + sizeof(struct kdma_request); if (amin > kdb->size) { up(&kdb->sem); return NULL; /* requested size is too large */ } spin_lock_irqsave(&kdb->lock, flags); while (1) { if (kdb->top == kdb->bottom) { /* whole buffer is free */ kdb->top = kdb->bottom = kdb->start; free = kdb->size - DMA_TRUNIT; break; } if (kdb->top > kdb->bottom) { /* [...#####...] */ free = kdb->end - kdb->top; if (amin <= free) break; if (kdb->bottom > kdb->start) { kdb->top = kdb->start; /* wrap around */ continue; } } else if (kdb->top < kdb->bottom) { /* [###.....###] */ free = kdb->bottom - kdb->top - DMA_TRUNIT; if (amin <= free) break; } spin_unlock_irqrestore(&kdb->lock, flags); kdb->error |= ps2dma_intr_safe_wait_for_completion(kdb->channel, poll, &kdb->c); spin_lock_irqsave(&kdb->lock, flags); } if (amin < kdb->allocmax && free > kdb->allocmax) free = kdb->allocmax; free -= sizeof(struct kdma_request); if (size) *size = free > max ? max : free; kdb->kreq = (struct kdma_request *)kdb->top; spin_unlock_irqrestore(&kdb->lock, flags); return (void *)kdb->kreq + sizeof(struct kdma_request); }